content
stringlengths
0
894k
type
stringclasses
2 values
""" A script that processes the Qualitivity XML files and creates CSV files of extracted data. """ import argparse import os import sys from xml.etree import ElementTree import numpy as np import pandas as pd # data frame columns columns = ['Record ID', 'Segment ID', 'Total pause duration_300', 'Pause count_300', 'Total pause duration_500', 'Pause count_500', 'Total pause duration_1s', 'Pause count_1s', 'Keystrokes', 'Active ms', 'Record duration', 'Total duration'] # date time format used in the XML DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f' def normalize_attribute(root): """ Make all the attributes lower case since the source XML files are not consistent. """ for attr, value in root.attrib.items(): norm_attr = attr.lower() if norm_attr != attr: root.set(norm_attr, value) root.attrib.pop(attr) for child in root: normalize_attribute(child) def create_pause_counts_dict(): """ Dictionary that will hold our pause count and duration value for a <Record/> element in the XML.""" return { 'duration_300': 0, 'count_300': 0, 'duration_500': 0, 'count_500': 0, 'duration_1000': 0, 'count_1000': 0, 'total_pause_ms': 0, 'total_duration': 0 } def ms(val): """ Turn a float value into milliseconds as an integer. """ return int(val * 1000) def categorize_pause(counts, pause_ms): """ The method that updates the count and duration values. :param counts: the dict that holds our pause count and duration values. :param pause_ms: the pause in milliseconds :return: None. """ if pause_ms >= 300: counts['duration_300'] += pause_ms counts['count_300'] += 1 if pause_ms >= 500: counts['duration_500'] += pause_ms counts['count_500'] += 1 if pause_ms >= 1000: counts['duration_1000'] += pause_ms counts['count_1000'] += 1 counts['total_duration'] += pause_ms def valid_keystroke(keystroke): """ Are we dealing with a valid keystroke? False if its a 'system' keystroke. """ if keystroke.attrib['origin'] and keystroke.attrib['system'] and not keystroke.attrib['key']: return False elif not keystroke.attrib['selection'] and not keystroke.attrib['text'] and not keystroke.attrib['key'] and \ keystroke.attrib['shift'] == 'False' and keystroke.attrib['ctrl'] == 'False' \ and keystroke.attrib['alt'] == 'False': return False else: return True def process_file(xml_input): """ The method that updates the count and duration values. :param xml_input: the XML file to be processes. :return: a pandas data frame of data extracted from the xml. """ # empty data structure for the data categorized_data = [] # keep track of all pauses all_pauses_data = [] if not os.path.isfile(xml_input): raise ValueError('{} is not a file'.format(xml_input)) # parse the document and get the root element doc_tree = ElementTree.parse(xml_input) root = doc_tree.getroot() # make attributes lower case - source XML not consistent normalize_attribute(root) # find all the <Record/> elements records = root.findall('.//Document/Record') # go through the records, each will be a row in the CVS file for record in records: # get the date/time that the record data started record_started = record.attrib['started'] record_started_dt = np.datetime64(record_started) # get the date/time that the record data stopped record_ended = record.attrib['stopped'] record_ended_dt = np.datetime64(record_ended) # calculate the duration of the work on the record in milliseconds duration_dt = record_ended_dt - record_started_dt duration_ms = duration_dt.astype(int) # we track 'milestones', i.e. where the last operation ended last_milestone = record_started_dt # values we want from the <Record/> attribute record_id = record.attrib['id'] segment_id = record.attrib['segmentid'] active_ms = record.attrib['activemiliseconds'] # calculate pauses pause_counts = create_pause_counts_dict() # get all the keystrokes for a record keystrokes = record.findall('.//ks') # count all the keystrokes keystrokes_count = len(keystrokes) valid_keystroke_count = 0 if keystrokes_count == 0: categorize_pause(pause_counts, duration_ms) all_pauses_data.append([record_id, segment_id, duration_ms, 'No ks']) elif keystrokes_count == 1 and not valid_keystroke(keystrokes[0]): categorize_pause(pause_counts, duration_ms) all_pauses_data.append([record_id, segment_id, duration_ms, '1 system ks omitted']) keystrokes_count = 0 else: # iterate over the keystrokes to calculate pauses for ks in keystrokes: # filter out 'system' keystrokes if valid_keystroke(ks): # keep track of valid keystrokes valid_keystroke_count += 1 created = ks.attrib['created'] created_dt = np.datetime64(created) diff = created_dt - last_milestone diff_ms = diff.astype(int) last_milestone = created_dt # categorise categorize_pause(pause_counts, diff_ms) # not categorised, for the audit all_pauses_data.append([record_id, segment_id, diff_ms, '']) else: all_pauses_data.append([record_id, segment_id, None, 'Omitted ks']) if valid_keystroke_count > 0: # calculate the pause between the last keystroke and when the record stopped. last_pause_dt = record_ended_dt - last_milestone last_pause_ms = last_pause_dt.astype(int) categorize_pause(pause_counts, last_pause_ms) all_pauses_data.append([record_id, segment_id, last_pause_ms, '']) keystrokes_count = valid_keystroke_count # create a row of data row = [record_id, segment_id, pause_counts['duration_300'], pause_counts['count_300'], pause_counts['duration_500'], pause_counts['count_500'], pause_counts['duration_1000'], pause_counts['count_1000'], keystrokes_count, active_ms, duration_ms, pause_counts['total_duration']] # append to 2d array categorized_data.append(row) # create pandas data frames df = pd.DataFrame(data=categorized_data, columns=columns) all_df = pd.DataFrame(data=all_pauses_data, columns=['Record ID', 'Segment ID', 'Pause durations', 'Notes']) return df, all_df def process(input_dir, output_dir, combine): """ Process a folder of XML files and create a folder of CSV file or single file with the combined results. :param input_dir: input directory with the source XML files. :param output_dir output directory to save the CSV file. :param combine boolean, (True) to combine the results, and (False) to create separate CSV files for each XML files. :return: a pandas data frame of data extracted from the xml. """ # holds data frames if we are combining # into a single output file combine_df = [] all_data_combined_df = [] omitted_combined_df = [] # check we have an input folder if not os.path.isdir(input_dir): print('Input is not a folder. Exiting') sys.exit(1) # check we have an output folder if not os.path.isdir(output_dir): print('Output is not a folder, creating it.') os.makedirs(output_dir) # walk the directory looking for files for root, dirs, files in os.walk(input_dir): # iterate the files for file in files: # we are interested in xml files if file.endswith('.xml'): # process the file and create a data frame input_file = os.path.join(root, file) df, all_df = process_file(input_file) # if we are combining, we want the filename in the data (first column). # add the data frame to our temporary array if combine: df.insert(0, 'File', file) all_df.insert(0, 'File', file) combine_df.append(df) all_data_combined_df.append(all_df) else: # not combining, so create a CSV file for each xml file output_file = os.path.join(output_dir, file.replace('.xml', '.csv')) all_output_file = os.path.join(output_dir, file.replace('.xml', '-audit.csv')) df.to_csv(output_file, index=False) all_df.to_csv(all_output_file, index=False) # if we are combining, combine output into two files if combine: df = pd.concat(combine_df, ignore_index=True) df.to_csv(os.path.join(output_dir, 'combined.csv'), index=False) all_df = pd.concat(all_data_combined_df, ignore_index=True) all_df.to_csv(os.path.join(output_dir, 'combined-audit.csv'), index=False) if __name__ == "__main__": """ Main method that will get arguments on the command line. """ # define the command line parameters and switches parser = argparse.ArgumentParser(description='Process Qualitivity XML files.') parser.add_argument('input', type=str, help='folder with the source XML files') parser.add_argument('output', type=str, help='folder for the output CSV files') parser.add_argument('--combine', required=False, action='store_true', help='Combine the output into a single CSV file') # parse and process args = parser.parse_args() process(args.input, args.output, args.combine)
python
# -*- coding: utf-8 -*- import cv2 import pytesseract pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe' from tkinter import filedialog from tkinter import * root = Tk() root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("all files","*.*"))) print (root.filename) img=cv2.imread(root.filename) text=pytesseract.image_to_string(img) print(text)
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Feb 14 18:00:19 2021 @author: dipu """ from rico import * from utils import * from moka import * from datasets import * from scipy.optimize import linear_sum_assignment import os import time import sys import shutil import random from time import strftime import argparse import numpy as np import torch import torch.utils.data from config_rico import add_eval_args #from data import PartNetDataset, Tree from rico import Hierarchy from datasets import RicoFlatDataset, RicoHierDataset import utils import time from utils import mkdir_if_missing from scipy.spatial.distance import cdist def vis_fn(q_uxid, q_o, r1_id, r1_o, r2_id, r2_o, r3_id, r3_o, r4_id, r4_o, r5_id, r5_o): return dict( q_id = q_uxid, query = q_o.to_string(render='html', labeled=True), query_layout = q_o.plot(), rank1_id = r1_id, rank1 = r1_o.to_string(render='html', labeled=r1_o.is_labeled), rank1_layout = r1_o.plot(), rank2_id = r2_id, rank2 = r2_o.to_string(render='html', labeled=r2_o.is_labeled), rank2_layout = r2_o.plot(), rank3_id = r3_id, rank3 = r3_o.to_string(render='html', labeled=r3_o.is_labeled), rank3_layout = r3_o.plot(), rank4_id = r4_id, rank4 = r4_o.to_string(render='html', labeled=r4_o.is_labeled), rank4_layout = r4_o.plot(), rank5_id = r5_id, rank5 = r5_o.to_string(render='html', labeled=r5_o.is_labeled), rank5_layout = r5_o.plot() ) def test_vis_fn(q_uxid, q_o, r1_id, r1_o, r2_id, r2_o, r3_id, r3_o, r4_id, r4_o, r5_id, r5_o): aa = [q_uxid, q_o, r1_id, r1_o, r2_id, r2_o, r3_id, r3_o, r4_id, r4_o, r5_id, r5_o] return aa def extract_features(conf, dataset, encoder): device = torch.device(conf.device) with torch.no_grad(): objects = [] for i, (uxid, o_gt) in enumerate(tqdm(dataset)): o_gt = o_gt.to(device) root_code = encoder.encode_structure(obj=o_gt) if not conf.non_variational: z, obj_kldiv_loss = torch.chunk(root_code, 2, 1) else: z = root_code z = z.detach().cpu().numpy() objects.append([uxid, o_gt, z]) return objects def main(): parser = argparse.ArgumentParser() parser = add_eval_args(parser) eval_conf = parser.parse_args() # Write here settings for debuging eval_conf.category = 'rico' eval_conf.exp_name = 'rico_hier_exp_AE_sem_wt_1_nnemb' eval_conf.semantics = 'rico_plus' eval_conf.test_dataset = '/home/dipu/dipu_ps/codes/UIGeneration/prj-ux-layout-copy/codes/scripts/rico_gen_data/rico_mtn_50_geq2_mcpn_10_V2/train_uxid.txt' eval_conf.model_epoch = None eval_conf.num_gen = 100 eval_conf.web_dir = './www' eval_conf.semantic_representation = 'nn_embedding' eval_conf.device = 'cuda:3' # load train config conf = torch.load(os.path.join(eval_conf.model_path, eval_conf.exp_name, 'conf.pth')) eval_conf.data_path = conf.data_path # merge training and evaluation configurations, giving evaluation parameters precendence conf.__dict__.update(eval_conf.__dict__) # load object category information if conf.semantics: Hierarchy.set_semantics(conf.semantics) if conf.extract_hier: assert conf.semantics == 'rico_plus' # load model models = utils.get_model_module(conf.model_version) # set up device device = torch.device(conf.device) print(f'Using device: {conf.device}') # check if eval results already exist. If so, delete it. # if os.path.exists(os.path.join(conf.result_path, conf.exp_name)): # response = input('Eval results for "%s" already exists, overwrite? (y/n) ' % (conf.exp_name)) # if response != 'y': # sys.exit() # shutil.rmtree(os.path.join(conf.result_path, conf.exp_name)) # create a new directory to store eval results # result_dir = os.path.join(conf.result_path, conf.exp_name) # mkdir_if_missing() # os.makedirs(os.path.join(conf.result_path, conf.exp_name)) # result_dir = os.path.join(conf.result_path, conf.exp_name) # create models encoder = models.RecursiveEncoder(conf, variational=True, probabilistic=not conf.non_variational) decoder = models.RecursiveDecoder(conf) models = [encoder, decoder] model_names = ['encoder', 'decoder'] print('\n\n') #print(f'non_probabilistic: {conf.non_probabilistic}') print(f'non_variational: {conf.non_variational}') # load pretrained model __ = utils.load_checkpoint( models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name), epoch=conf.model_epoch, strict=True) # send to device for m in models: m.to(device) # set models to evaluation mode for m in models: m.eval() # create dataset and data loader data_features = ['uxid', 'object'] DatasetClass = globals()[conf.DatasetClass] print('Using dataset:', DatasetClass) test_dataset = DatasetClass(conf.data_path, conf.test_dataset, ['uxid', 'object'], is_train=False, permute=False, n_permutes=1) #dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, collate_fn=lambda x: list(zip(*x))) # visualize(P, conf, conf.exp_name, test_dataset, encoder, decoder, result_dir, conf.web_dir, show=False) feats_objects = extract_features(conf, test_dataset, encoder) feats = np.concatenate([x[-1] for x in feats_objects]) uxids = [x[0] for x in feats_objects] hiers = [x[1] for x in feats_objects] uxid2hier = dict((k,v) for k,v in zip(uxids, hiers)) distances = cdist(feats, feats, metric= 'euclidean') sort_inds = np.argsort(distances) sample_retrievals = [] for ii in range(100): q_uxid = uxids[ii] ranked_uxids = [] ranked_hiers = [] for yy in sort_inds[ii,:5]: ranked_uxids.append(uxids[yy]) ranked_hiers.append(uxid2hier[uxids[yy]]) # ranked_uxids = [uxids[yy] for yy in sort_inds[ii,:5]] # ranked_hiers = [uxid2hier[id] for id in ranked_uxids ] ranked = [None] * (len(ranked_uxids) + len(ranked_hiers)) ranked[::2] = ranked_uxids ranked[1::2] = ranked_hiers sample_retrievals.append([q_uxid, uxid2hier[q_uxid]] + ranked) visualize_retrieved_images(conf, sample_retrievals, web_dir = 'www', show=False ) def visualize_retrieved_images(conf, sample_retrievals, web_dir='www', show=False, refresh=False): split = 'train' if 'train' in conf.test_dataset else 'val' if conf.model_epoch is None: html = HTML(f'/retrieval_{split}@{conf.exp_name}', conf.exp_name, base_url=web_dir, inverted=True, overwrite=True, refresh=int(refresh)) else: html = HTML(f'/retrieval_{split}@{conf.exp_name}_epoch_{conf.model_epoch}', conf.expname, base_url=web_dir, inverted=True, overwrite=True, refresh=int(refresh)) html.add_table().add([vis_fn(*_) for _ in tqdm(sample_retrievals)]) html.save() domain = conf.domain if hasattr(conf, 'domain') else None if show: html.show(domain) #else: P.print(html.url(domain)) if __name__ == '__main__': main()
python
import numpy as np import os import cv2 def make_image_noisy(image, noise_typ): if noise_typ == "gauss": row, col, ch = image.shape mean = 0 var = 40 sigma = var**0.5 gauss = np.random.normal(mean, sigma, (row, col, ch)) gauss = gauss.reshape((row, col, ch)) noisy_image = image + gauss return noisy_image.clip(0, 255) elif noise_typ == "zero": amount = 0.05 # percentage of zero pixels out = np.copy(image) num_zeros = np.ceil(amount * image.shape[0]*image.shape[1]) coords = [np.random.randint(0, i - 1, int(num_zeros)) for i in image.shape[:2]] out[:, :, 0][coords] = 0 out[:, :, 1][coords] = 0 out[:, :, 2][coords] = 0 return out.astype(np.uint8) elif noise_typ == "s&p": raise RuntimeError("Test it properly before using!") row, col, ch = image.shape s_vs_p = 0.5 amount = 0.004 out = np.copy(image) # Salt mode num_salt = np.ceil(amount * image.size * s_vs_p) coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape] out[coords] = 1 # Pepper mode num_pepper = np.ceil(amount* image.size * (1. - s_vs_p)) coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape] out[coords] = 0 return out elif noise_typ == "poisson": raise RuntimeError("Test it properly before using!") vals = len(np.unique(image)) vals = 2 ** np.ceil(np.log2(vals)) noisy_image = np.random.poisson(image * vals) / float(vals) return noisy_image elif noise_typ == "speckle": raise RuntimeError("Test it properly before using!") row, col, ch = image.shape gauss = np.random.randn(row, col, ch) gauss = gauss.reshape((row, col, ch)) noisy_image = image + image * gauss return noisy_image else: raise RuntimeError(f"Unknown noisy_type: {noise_typ}")
python
# -*- coding: utf-8 -*- from django.apps import AppConfig import urllib, requests, json from timetable.models import Course from ngram import NGram class SearchConfig(AppConfig): name = 'curso' class SearchOb(object): """docstring for SearchOb""" def __init__(self, uri=None): from pymongo import MongoClient self.client = MongoClient(uri) self.db = self.client['timetable'] self.SrchCollect = self.db['CourseSearch'] self.cursoNgram = NGram((i['key'] for i in self.SrchCollect.find({}, {'key':1, '_id':False}))) def search(self, keyword, school): cursor = self.SrchCollect.find({'key':keyword}, {school:1, '_id':False}).limit(1) if cursor.count() > 0: pass else: keyword = self.cursoNgram.find(keyword) if keyword: cursor = self.SrchCollect.find({'key':keyword}, {school:1, '_id':False}).limit(1) else: return [] return cursor[0][school]
python
#!/usr/bin/env python """The setup script.""" try: from setuptools import find_packages, setup except ImportError: from distutils.core import find_packages, setup setup(name='hyperscan-python', version='0.1', description='Simple Python bindings for the Hyperscan project.', author='Andreas Moser', author_email='[email protected]', license='Apache License, Version 2.0', packages=find_packages('.', exclude=[ 'tests' ]))
python
def test_geoadd(judge_command): judge_command( 'GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"', { "command": "GEOADD", "key": "Sicily", "longitude": "15.087269", "latitude": "37.502669", "member": '"Catania"', }, ) def test_georadiusbymember(judge_command): judge_command( "GEORADIUSBYMEMBER Sicily Agrigento 100 km", { "command": "GEORADIUSBYMEMBER", "key": "Sicily", "member": "Agrigento", "float": "100", "distunit": "km", }, ) def test_georadius(judge_command): judge_command( "GEORADIUS Sicily 15 37 200 km WITHDIST WITHCOORD ", { "command": "GEORADIUS", "key": "Sicily", "longitude": "15", "latitude": "37", "float": "200", "distunit": "km", "geochoice": "WITHCOORD", }, )
python
import jpype jpype.startJVM() from asposecells.api import Workbook, PdfSaveOptions, ImageOrPrintOptions, SheetRender import cv2 import numpy as np DEBUG_MODE = False def excel2imgs(excel_path): workbook = Workbook(excel_path) ''' Excel to PDF ''' # pdfOptions = PdfSaveOptions() # pdfOptions.setOnePagePerSheet(True) # workbook.save("../test_images/example.pdf", pdfOptions) imgOptions = ImageOrPrintOptions() imgOptions.setHorizontalResolution(300) imgOptions.setVerticalResolution(300) imgOptions.setCellAutoFit(True) imgOptions.setOnePagePerSheet(True) img_datasets = [] sheet_Count = workbook.getWorksheets().getCount() for i in range(sheet_Count): sheet = workbook.getWorksheets().get(i) sr = SheetRender(sheet, imgOptions) imgbytes_content = sr.toImageBytes(0) img = cv2.imdecode(np.frombuffer(imgbytes_content, np.uint8), cv2.IMREAD_COLOR) img_datasets.append(img) if DEBUG_MODE: cv2.imwrite("../test_results/Excel2Image/bytes2cvimg_" + str(i) + ".png", img) # sr.toImage(i, "../test_results/Excel2Image/excel2img_" + str(i) +".png") # jpype.shutdownJVM() return img_datasets, sheet_Count ############################### if __name__ == "__main__": excel_path = "/home/elimen/Data/helloflask/FlaskTutorial/rewrite.xls" img_datasets = excel2imgs(excel_path) print(" Number of images: {}".format(len(img_datasets))) print(" Type of image: {}".format(type(img_datasets[0])))
python
# -*- coding: UTF-8 -*- import cv2 as cv import os import argparse import numpy as np import pandas as pd import time from utils import choose_run_mode, load_pretrain_model, set_video_writer from Pose.pose_visualizer import TfPoseVisualizer from Action.recognizer import load_action_premodel, framewise_recognize parser = argparse.ArgumentParser(description='Action Recognition by OpenPose') parser.add_argument( '-img', '--image', required="True", help='Path to image folder.') args = parser.parse_args() # imported related models estimator = load_pretrain_model('VGG_origin') action_classifier = load_action_premodel('Action/framewise_recognition.h5') # parameter initialization realtime_fps = '0.0000' start_time = time.time() fps_interval = 1 fps_count = 0 run_timer = 0 frame_count = 0 folder_path = args.image # create df for saving joints columns = ["nose_x", "nose_y", "neck_x", "neck_y", "Rshoulder_x", "Rshoulder_y", "Relbow_x", "Relbow_y", "Rwrist_x", "RWrist_y", "LShoulder_x", "LShoulder_y", "LElbow_x", "LElbow_y", "LWrist_x", "LWrist_y", "RHip_x", "RHip_y", "RKnee_x", "RKnee_y", "RAnkle_x", "RAnkle_y", "LHip_x", "LHip_y", "LKnee_x", "LKnee_y", "LAnkle_x", "LAnkle_y", "REye_x", "REye_y", "LEye_x", "LEye_y", "REar_x", "REar_y", "LEar_x", "LEar_y", "class"] df = pd.DataFrame(columns=columns) for f_name in os.listdir(folder_path): sub_f = folder_path + "/" + f_name # folder_out = "test_out" + "/" + f_name print("f_name: " + f_name) # if not os.path.isdir(folder_out): # os.mkdir(folder_out) for img in os.listdir(sub_f): print("image name: " + img) show = cv.imread(sub_f + "/" + img) fps_count += 1 frame_count += 1 # pose estimation humans = estimator.inference(show) # print(len(humans)) # print(humans[0].uidx_list) # print(humans[0].body_parts) # get pose info pose = TfPoseVisualizer.draw_pose_rgb(show, humans) # return frame, joints, bboxes, xcenter # recognize the action framewise show = framewise_recognize(pose, action_classifier) # height, width = show.shape[:2] # # Display real-time FPS values # if (time.time() - start_time) > fps_interval: # # 计算这个interval过程中的帧数,若interval为1秒,则为FPS # # Calculate the number of frames in this interval. If the interval is 1 second, it is FPS. # realtime_fps = fps_count / (time.time() - start_time) # fps_count = 0 # Clear the number of frames # start_time = time.time() # fps_label = 'FPS:{0:.2f}'.format(realtime_fps) # cv.putText(show, fps_label, (width-160, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) # # Show the number of people detected # num_label = "Human: {0}".format(len(humans)) # cv.putText(show, num_label, (5, height-45), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) # # Show current run time and total frames # if frame_count == 1: # run_timer = time.time() # run_time = time.time() - run_timer # time_frame_label = '[Time:{0:.2f} | Frame:{1}]'.format(run_time, frame_count) # cv.putText(show, time_frame_label, (5, height-15), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) # cv.imshow('Action Recognition based on OpenPose', show) # img_out = img.split(".")[0] + "_out_" + ".png" # cv.imwrite(folder_out + "/" + img, show) # video_writer.write(show) # # Collect data for training process (for training) joints_norm_per_frame = np.array(pose[-1]).astype(np.str) # print("length of joints frames: " + str(len(joints_norm_per_frame))) # only select joints_norm_per_frame with 1 human if len(joints_norm_per_frame) == 36: row = np.append(joints_norm_per_frame, f_name) series = pd.Series(dict(zip(df.columns, row))) df = df.append(series, ignore_index=True) # saving df to csv df.to_csv("Action/training/human_keypoint.csv", index=False)
python
from typing import Optional, List from reconbot.notificationprinters.embedformat import EmbedFormat class NotificationFormat(object): def __init__(self, content: Optional[str], embeds: Optional[List[EmbedFormat]] = None): self.content = content if embeds is None: self.embeds = [] else: self.embeds = embeds
python
from typing import List, Dict, Union from sse_starlette.sse import EventSourceResponse from fastapi import Depends, FastAPI, Request from fastapi_users import FastAPIUsers, BaseUserManager from fastapi_users.authentication import JWTAuthentication from sqlalchemy.orm import Session from . import crud, schemas from .argo import get_argo_router from .database import SessionLocal from .adapter import SQLAlchemyORMUserDatabase from .schemas import User, UserCreate, UserUpdate, UserDB from .utils import incident_event_generator db_session = SessionLocal() SECRET = "OpenSOAR@11042018" auth_backends = [] jwt_authentication = JWTAuthentication( secret=SECRET, lifetime_seconds=3600, tokenUrl="auth/jwt/login" ) auth_backends.append(jwt_authentication) class UserManager(BaseUserManager[UserCreate, UserDB]): user_db_model = UserDB reset_password_token_secret = SECRET verification_token_secret = SECRET def get_user_db(): yield SQLAlchemyORMUserDatabase(UserDB, db_session) def get_user_manager(user_db=Depends(get_user_db)): yield UserManager(user_db) fastapi_users = FastAPIUsers( get_user_manager, auth_backends, User, UserCreate, UserUpdate, UserDB, ) app = FastAPI(root_path="/api") def get_db(): db = SessionLocal() try: yield db finally: db.close() @app.get("/") def read_root(): return {} app.include_router( fastapi_users.get_auth_router(jwt_authentication), prefix="/auth/jwt", tags=["auth"] ) app.include_router( fastapi_users.get_register_router(), prefix="/auth", tags=["auth"], ) app.include_router( fastapi_users.get_users_router(), prefix="/users", tags=["users"], ) app.include_router( get_argo_router(fastapi_users), prefix="/argo", ) @app.get("/users", response_model=List[User]) def read_users( db: Session = Depends(get_db), user: User = Depends(fastapi_users.current_user(active=True)), ): return crud.read_users(db) @app.get("/incidents", response_model=Dict[str, Union[List[schemas.IncidentRead], int]]) def read_incidents( skip: int = 0, limit: int = 10, query_filter: str = None, db: Session = Depends(get_db), user: User = Depends(fastapi_users.current_user(active=True)), ): return crud.get_incidents(db, skip=skip, limit=limit, query_filter=query_filter) @app.post("/incidents", response_model=schemas.Incident) def create_incident( incident: schemas.IncidentCreate, db: Session = Depends(get_db), user: User = Depends(fastapi_users.current_user(active=True)), ): return crud.create_incident(db, incident) @app.get("/incidents/stream") async def read_incidents_from_stream( request: Request, db: Session = Depends(get_db), user: User = Depends(fastapi_users.current_user(active=True)), ): incident_generator = incident_event_generator(db, request) return EventSourceResponse(incident_generator)
python
# -*- coding: UTF-8 -*- # Copyright 2015-2020 Rumma & Ko Ltd # License: BSD (see file COPYING for details) """Same as :mod:`lino_book.projects.noi1e`, but using :ref:`react` as front end. This uses :ref:`hosting.multiple_frontends`. .. autosummary:: :toctree: settings tests """
python
from mongoengine import signals __author__ = 'Enis Simsar' import json import re import threading from datetime import datetime from decouple import config from tweepy import OAuthHandler from tweepy import Stream from tweepy.streaming import StreamListener from models.Tweet import Tweet from models.Topic import Topic def get_info(topic_dic): keywords = [] topics = [] lang = [] for key in topic_dic: topic = topic_dic[key] topics = topics + [topic['id']] keywords = keywords + topic['keywords'] lang = lang + topic['languages'] lang = list(set(lang)) lang = [str(l) for l in lang] keywords = list(set(keywords)) keywords = [str(keyword) for keyword in keywords] result = { 'topics': sorted(topics), 'keywords': keywords, 'lang': lang } return result def create_tweet(topic_id, tweet): topic = Topic.objects.get(id=topic_id) tweet_obj = Tweet() tweet_obj.topic_id = topic.id tweet_obj.published_at = datetime.fromtimestamp(int(tweet['timestamp_ms']) / 1e3) tweet_obj.entry = tweet tweet_obj.save() topic.last_tweet_at = datetime.now topic.save() def separates_tweet(topic_dic, tweet): for key in topic_dic: topic = topic_dic[key] if tweet['lang'] in topic['languages']: for keyword in topic['keywords']: keyword = re.compile(keyword.replace(" ", "(.?)"), re.IGNORECASE) if 'extended_tweet' in tweet and 'full_text' in tweet['extended_tweet']: if re.search(keyword, str(tweet['extended_tweet']['full_text'])): create_tweet(key, tweet) break else: if re.search(keyword, str(tweet['text'])): create_tweet(key, tweet) break # Accessing Twitter API consumer_key = config("TWITTER_CONSUMER_KEY") # API key consumer_secret = config("TWITTER_CONSUMER_SECRET") # API secret access_token = config("TWITTER_ACCESS_TOKEN") access_secret = config("TWITTER_ACCESS_SECRET") # This is a basic listener that just prints received tweets to stdout. class StdOutListener(StreamListener): def __init__(self, topic_dic): self.topic_dic = topic_dic self.terminate = False self.connection = True super(StdOutListener, self).__init__() def on_data(self, data): if not self.terminate: tweet = json.loads(data) separates_tweet(self.topic_dic, tweet) return True else: return False def on_disconnect(self, notice): self.connection = False return True def on_error(self, status): print(status) if status == 420: return False def stop(self): self.terminate = True def on_timeout(self): return True # To continue listening class StreamCreator(): def __init__(self, topic_dic): # This handles Twitter authetification and the connection to Twitter Streaming API self.l = StdOutListener(topic_dic) signals.post_save.connect(Tweet.post_save, sender=Tweet) self.info = get_info(topic_dic=topic_dic) self.keywords = self.info['keywords'] self.lang = self.info['lang'] self.topics = self.info['topics'] print(self.topics) print(self.keywords) print(self.lang) self.auth = OAuthHandler(consumer_key, consumer_secret) self.auth.set_access_token(access_token, access_secret) self.stream = Stream(self.auth, self.l) self.t = threading.Thread(target=self.stream.filter, kwargs={'track': self.keywords, 'languages': self.lang, 'stall_warnings': True}) def start(self): self.t.deamon = True self.t.start() def terminate(self): self.l.running = False self.l.stop() self.l.terminate = True
python
#!/usr/bin/env python3 import argparse, os """ Trenco Module for arguments """ def txn_args(parser): parser.add_argument('--annotation-file', dest = 'annotfname', default = '', help="Genode annotations file in gtf format (overwrites --annotation-version and --organism") parser.add_argument('--annotation-version', dest="annotations", default="vM4", help="The Gencode annotations file in gtf format. (Default: vM4) (WARNING: All entries are indexed to this version)") parser.add_argument('--organism', default="mouse", help="Organism gencode to download (Default: mouse)" ) parser.add_argument('-b', '--biotypes', help="The biotypes to get transcript TSS. (default: protein)", nargs='+', default=['protein_coding']) def enh_bound_args(parser, tot = True): if tot: parser.add_argument('-t', '--tss', help="The Gencode TSS file.", required=True) parser.add_argument('-s', '--sizes', help="The chrome sizes file or genome number (ie mm10)", required=True) parser.add_argument('-p', '--peaks', help="The full path to peak files in bed format", nargs='+', required=True) #parser.add_argument('--geneGTF', # help="GTF file of genes from Gencode (Default gene_txn.gtf from get_trancript script)", # default = "gene_txn.gtf") parser.add_argument('-r', '--region', help="The number of bases pairs to exclude around TSS (Default: 2500)", type=int, default=2500) parser.add_argument('-q', '--promoter-range', help="Range of numbers before TSS and after TSS to consider as Promoter (Default: 1000-200)", type=str, default="1000-200") parser.add_argument('-d', '--distance', help="The number of bases pairs to merge between adjacent peaks (Default: 150)", type=int, default=150) def merge_txn_args(parser): parser.add_argument('-e', '--expression', help="The full path to peak files in tsv format", nargs='+', required=True) def merge_enh_args(parser): parser.add_argument('-e', '--enhancers', help="The universe of enhancer files.", required=True) parser.add_argument("-t", "--enhMarks", dest='target', type=str, default="H3K27ac", help="Mark for enchancers: Default H3K27ac") parser.add_argument('-a', '--alignments', help="The full path to sorted alignment files in bam format.", nargs='+', required=True) def full_trenco_args(parser): path = '/'.join(os.path.realpath(__file__).split('/')[:-2]) parser.add_argument("--design", type=str, required=True, help="Design file containing link information to samples.") parser.add_argument("--alignment", nargs='+', required=True, help="Full path to ChIP alingment files in bam format") parser.add_argument("--expression", nargs='+', required=True, help="Full path to transcript expression table in tsv format") parser.add_argument("--enhMarks", dest='target', type=str, default="H3K27ac", help="Mark for enchancers: Default H3K27ac") parser.add_argument("--tadBED", type=str, default="%s/common_files/TAD_positions_mm10_sorted.bed" % path, help="TAD file: Default - mm10 TAD in common_files") def tf_mtx_args(parser, spec = True): parser.add_argument("--meme-db", type=str, default="cis-bp", help="MEME database to use (Default: cis-bp)") parser.add_argument("--db", type=str, help="Motif database name if different from species (ex JASPER CORE 2014 for jasper)") if spec: parser.add_argument('-s', '--species', dest='refID', nargs='+', required=True, help = "Scientific name of organism (can use these names for ease: human, mouse, fly, worm)") parser.add_argument('-g', '--genome-version', dest='gvers', type=str, help = "Version of genome to use. Default is newest") parser.add_argument('--bed', dest='bed', type=str, help = "ChIP and Promoter bed file for getting motifs (ex enh.bed,promoter.bed)") def enh_gene_nw_args(parser): parser.add_argument("-e", "--enh", help="enhancer by samples log2 TPM quantification matrix", type=str) parser.add_argument("-g", "--gene", help="gene by samples log2 TPM quantification matrix", type=str) parser.add_argument("-ta", "--tadBED", help='sorted tad annotation in bed file format', type=str) parser.add_argument("-ga", "--geneBED", help='gene annotation in bed file format', type=str) parser.add_argument("-ea", "--enhBED", help='enh annotation in bed file format') parser.add_argument("-s", "--sample", help='sample to construct the network', type=str) parser.add_argument("-o", "--output", help="output directory", type=str) parser.add_argument("-p", "--threads", help="Threads", type=int, default=30) def tis_gene_networks(parser): parser.add_argument("-d", "--dir", help="directory containing the output of get_enh_gene_networks.py", type=str) parser.add_argument("-s", "--sample", help='sample to construct the network', type=str) parser.add_argument("-p", "--threads", help='Number of threads to use', type=int, default=30) parser.add_argument("-x1", "--matrix1", help='TF by enchancer matrix file path', type=str) parser.add_argument("-x2", "--matrix2", help="TF by gene promoter matrix file path", type=str) parser.add_argument("-v", "--vector", help="Expression vector for the sample from RNA-seq", type=str)
python
#!/usr/bin/env python3 import argparse import json import sys from datetime import datetime from time import sleep from splinter import Browser from tvlist_loader import xlparser from tvlist_loader import scraper from tvlist_loader import projects_parser as pp def main(): # Parse cli arguments parser = argparse.ArgumentParser() parser.add_argument("FILE", help="Файл программы передач в формате Excel") parser.add_argument( "-s", "--sheet", help="Имя листа с программой передач. По умолчанию 'Лист1'") parser.add_argument("-a", "--auth", help="Файл с адресом сайта, логином и паролем в формате JSON") parser.add_argument("-b", "--browser", help="Браузер, который будет использоваться для открывания ссылок. Доступные значения 'firefox' (по умолчанию), 'chrome'.") parser.add_argument("-H", "--headless", action="store_true", default=False, help="Запустить браузер без графического интерфейса.") args = vars(parser.parse_args()) # Set sheet to read if args["sheet"]: sheet = args["sheet"] else: sheet = "Лист1" if args["auth"]: file_client = args["auth"] else: file_client = "client_id.json" try: with open(file_client, "r") as file_json: client = json.load(file_json) except FileNotFoundError: print(f"Не удалось открыть {file_client}. Поместите файл 'client_id.json' в папку запуска программы или укажите другой файл с помощью параметра -a") sys.exit(1) except json.decoder.JSONDecodeError: print(f"Файл {file_client} не является корректным JSON.") sys.exit(1) if args["browser"] == "firefox" or args["browser"] == "chrome": browse_with = args["browser"] else: browse_with = "firefox" site = client['site'] table = xlparser.get_table(args["FILE"], sheet) week = xlparser.get_dates(table) with Browser(browse_with, headless=args["headless"]) as browser: projects = pp.get_projects(browser, site) for day, value in week.items(): week[day]["programs"] = xlparser.get_program(table, value["id"], projects) with open("schedule.json", "w", encoding="utf-8") as file_json: json.dump(week, file_json, indent=4, ensure_ascii=False) scraper.login(browser, site, client['login'], client['password']) scraper.open_schedule(browser, site) for days in week.values(): scraper.add_day(browser, days["day"], days["date"]) for programs in days["programs"].values(): scraper.add_program( browser, programs["name"], programs["time"], programs["age"], programs["project"], programs["project_name"]) scraper.commit(browser) if __name__ == '__main__': main()
python
import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns import shutil # =========== HYPERPARAMETERS ========== UNIVARIATE_DISTRIBUTIONS = ['chi_square_9', 'exp_9'] NUM_SAMPLES = 20000 NUM_TRIALS = 5 # ========== OUTPUT DIRECTORIES ========== OUTPUT_DIR = 'examples/power_analyses/univariate_output/' MODELS_OUTPUT_DIR = OUTPUT_DIR + 'MODELS/' SYN_DATA_OUTPUT_DIR = OUTPUT_DIR + 'SYN_DATA/' REAL_DATA_OUTPUT_DIR = OUTPUT_DIR + 'REAL_DATA/' POWER_OUTPUT_DIR = OUTPUT_DIR + 'POWER/' RESULTS_DIR = 'RESULTS/' # shutil.rmtree(OUTPUT_DIR, ignore_errors=True) # os.makedirs(MODELS_OUTPUT_DIR) # os.makedirs(SYN_DATA_OUTPUT_DIR) # os.makedirs(REAL_DATA_OUTPUT_DIR) # os.makedirs(POWER_OUTPUT_DIR) os.makedirs(RESULTS_DIR) # ========== RUN PIPELINE ========== def generate_real_cmd(dist, num_samples, output_dir): return 'python3 sample_prob_dist.py {0} {1} {2}/'.format(dist, num_samples, output_dir) def train_gan_cmd(real_data_dir, output_dir): return 'python3 train_prob_gan.py {0}data.npy {1}'.format(real_data_dir, output_dir) def generate_syn_cmd(gen_dir, num_samples, output_dir): return 'python3 generate_prob_gan.py {0}generator {1} {2}'.format(gen_dir, num_samples, output_dir) def power_analysis_cmd(real_data_1_dir, real_data_2_dir, syn_data_1_dir, syn_data_2_dir, output_dir): return 'python3 univariate_power_analysis.py {0}data.npy {1}data.npy {2}data.npy {3}data.npy {4}'.format(real_data_1_dir, syn_data_1_dir, real_data_2_dir, syn_data_2_dir, output_dir) def output_dirs(dist, k): model_tag_base = '[{0}]_[k={1}]'.format(dist, k) model_1_tag = model_tag_base + '_[v=1]' model_2_tag = model_tag_base + '_[v=2]' real_data_1_dir = '{0}{1}/'.format(REAL_DATA_OUTPUT_DIR, model_1_tag) real_data_2_dir = '{0}{1}/'.format(REAL_DATA_OUTPUT_DIR, model_2_tag) model_1_dir = '{0}{1}/'.format(MODELS_OUTPUT_DIR, model_1_tag) model_2_dir = '{0}{1}/'.format(MODELS_OUTPUT_DIR, model_2_tag) syn_data_1_dir = '{0}{1}/'.format(SYN_DATA_OUTPUT_DIR, model_1_tag) syn_data_2_dir = '{0}{1}/'.format(SYN_DATA_OUTPUT_DIR, model_2_tag) return real_data_1_dir, real_data_2_dir, model_1_dir, model_2_dir, syn_data_1_dir, syn_data_2_dir def run_cmd_sequence(cmds): for cmd in cmds: os.system(cmd) def generate_real_data_samples(): for i in range(len(UNIVARIATE_DISTRIBUTIONS)): for k in range(NUM_TRIALS): dist_i = UNIVARIATE_DISTRIBUTIONS[i] real_data_1_dir, real_data_2_dir, _, _, _, _ = output_dirs(dist_i, k) sample_real_1 = generate_real_cmd(dist_i, NUM_SAMPLES, real_data_1_dir) sample_real_2 = generate_real_cmd(dist_i, NUM_SAMPLES, real_data_2_dir) run_cmd_sequence([sample_real_1, sample_real_2]) def train_gans(): for i in range(len(UNIVARIATE_DISTRIBUTIONS)): for k in range(NUM_TRIALS): dist_i = UNIVARIATE_DISTRIBUTIONS[i] real_data_1_dir, real_data_2_dir, model_1_dir, model_2_dir, _, _ = output_dirs(dist_i, k) train_gan_1 = train_gan_cmd(real_data_1_dir, model_1_dir) train_gan_2 = train_gan_cmd(real_data_2_dir, model_2_dir) run_cmd_sequence([train_gan_1, train_gan_2]) def generate_syn_data_samples(): for i in range(len(UNIVARIATE_DISTRIBUTIONS)): for k in range(NUM_TRIALS): dist_i = UNIVARIATE_DISTRIBUTIONS[i] _, _, model_1_dir, model_2_dir, syn_data_1_dir, syn_data_2_dir = output_dirs(dist_i, k) sample_syn_1 = generate_syn_cmd(model_1_dir, NUM_SAMPLES, syn_data_1_dir) sample_syn_2 = generate_syn_cmd(model_2_dir, NUM_SAMPLES, syn_data_2_dir) run_cmd_sequence([sample_syn_1, sample_syn_2]) def run_power_analyses(): for i in range(len(UNIVARIATE_DISTRIBUTIONS)): for j in range(i, len(UNIVARIATE_DISTRIBUTIONS)): for k in range(NUM_TRIALS): dist_i = UNIVARIATE_DISTRIBUTIONS[i] dist_j = UNIVARIATE_DISTRIBUTIONS[j] real_data_1_dir_i, real_data_2_dir_i, _, _, syn_data_1_dir_i, syn_data_2_dir_i = output_dirs(dist_i, k) real_data_1_dir_j, real_data_2_dir_j, _, _, syn_data_1_dir_j, syn_data_2_dir_j = output_dirs(dist_j, k) output_dir = '{0}[{1}_VS_{2}]_[k={3}]/'.format(POWER_OUTPUT_DIR, dist_i, dist_j, k) cmd = power_analysis_cmd(real_data_1_dir_i, real_data_2_dir_j, syn_data_1_dir_i, syn_data_2_dir_j, output_dir) run_cmd_sequence([cmd]) def visualize(): for i in range(len(UNIVARIATE_DISTRIBUTIONS)): for j in range(i, len(UNIVARIATE_DISTRIBUTIONS)): figure, axes = plt.subplots(nrows=1, ncols=1) n = None t_test_real_power = [] mmd_test_real_power = [] t_test_syn_power = [] mmd_test_syn_power = [] for k in range(NUM_TRIALS): dist_i = UNIVARIATE_DISTRIBUTIONS[i] dist_j = UNIVARIATE_DISTRIBUTIONS[j] power_dir_k = '{0}[{1}_VS_{2}]_[k={3}]/'.format(POWER_OUTPUT_DIR, dist_i, dist_j, k) if n is None: n = np.load(power_dir_k+'n.npy') t_test_real_power.append(np.load(power_dir_k+'t_test_real_power.npy')) mmd_test_real_power.append(np.load(power_dir_k+'mmd_test_real_power.npy')) t_test_syn_power.append(np.load(power_dir_k+'t_test_syn_power.npy')) mmd_test_syn_power.append(np.load(power_dir_k+'mmd_test_syn_power.npy')) n = np.array(n) t_test_real_power = np.array(t_test_real_power) mmd_test_real_power = np.array(mmd_test_real_power) t_test_syn_power = np.array(t_test_syn_power) mmd_test_syn_power = np.array(mmd_test_syn_power) # Plot curve of n vs power # sns.tsplot(data=t_test_real_power, time=n, ci=[68, 95], color='blue', condition='Real', ax=axes[0]) # sns.tsplot(data=t_test_syn_power, time=n, ci=[68, 95], color='orange', condition='Synthetic', ax=axes[0]) # axes[0].set_title('Sample Size vs T Test Power') # axes[0].set_xlabel('Sample Size') # axes[0].set_ylabel('Power') # axes[0].set_ylim([-0.1, 1.1]) # axes[0].legend(loc="upper right") sns.tsplot(data=mmd_test_real_power, time=n, ci=[68, 95], color='blue', condition='Real', ax=axes) sns.tsplot(data=mmd_test_syn_power, time=n, ci=[68, 95], color='orange', condition='Synthetic', ax=axes) axes.set_title('Sample Size vs MMD Test Power') axes.set_xlabel('Sample Size') axes.set_ylabel('Power') axes.set_ylim([-0.1, 1.1]) axes.legend(loc="upper right") # Save results figure.tight_layout() figure.savefig('{0}{1}_VS_{2}'.format(RESULTS_DIR, dist_i, dist_j), format='eps') # ========== MAIN ========== # generate_real_data_samples() # train_gans() # generate_syn_data_samples() # run_power_analyses() visualize()
python
import scrapy import re from locations.items import GeojsonPointItem DAY_MAPPING = { "Mon": "Mo", "Tues": "Tu", "Wed": "We", "Thur": "Th", "Fri": "Fr", "Sat": "Sa", "Sun": "Su" } class KoppsSpider(scrapy.Spider): name = "kopps" item_attributes = { 'brand': "Kopps" } allowed_domains = ["www.kopps.com"] download_delay = 1.5 start_urls = ( 'https://www.kopps.com/', ) def parse_day(self, day): if re.search('-', day): days = day.split('-') osm_days = [] if len(days) == 2: for day in days: osm_day = DAY_MAPPING[day.strip()] osm_days.append(osm_day) return "-".join(osm_days) def parse_times(self, times): if times.strip() == 'Open 24 hours': return '24/7' hours_to = [x.strip() for x in times.split('-')] cleaned_times = [] for hour in hours_to: if re.search('pm$', hour): hour = re.sub('pm', '', hour).strip() hour_min = hour.split(":") if int(hour_min[0]) < 12: hour_min[0] = str(12 + int(hour_min[0])) cleaned_times.append(":".join(hour_min)) if re.search('am$', hour): hour = re.sub('am', '', hour).strip() hour_min = hour.split(":") if len(hour_min[0]) <2: hour_min[0] = hour_min[0].zfill(2) else: hour_min[0] = str( int(hour_min[0])) cleaned_times.append(":".join(hour_min)) return "-".join(cleaned_times) def parse_hours(self, lis): hours = [] for li in lis: day_times = li.xpath('normalize-space(./text())').extract_first() day = re.findall(r"^[a-zA-Z-]+" , day_times) if(len(day)>0): day = day[0] else: day = 'Mon-Sun' times = re.findall(r"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m" ,day_times) times = times[0] if times and day: parsed_time = self.parse_times(times) parsed_day = self.parse_day(day) hours.append(parsed_day + ' ' + parsed_time) return "; ".join(hours) def parse(self, response): locations = response.xpath('//div[@id="locations"]/div/div') for location in locations: properties = { 'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(), 'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(), 'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\r\n' ,''), 'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1], 'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\r\n' ,''), 'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(), 'website': response.url, 'lat':re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0], 'lon': re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1], } hours = self.parse_hours(location.xpath('./div/ul/li[3]/span')) if hours: properties['opening_hours'] = hours yield GeojsonPointItem(**properties)
python
import smtplib from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email import encoders from user import User from mail import Mail class ImportantUser(User): ''' ImportantUser class inherits from User class. It is more complex version of it. It let's user add attachment to mail paired with signature image (ex. logo). Input: sender, password, smtp_name - str smtp_port - int attachment_name, signature_img_name - is a str name of a file with the extension attachment_path, signature_img_path - is a str absolute path to the folder with image ''' def __init__(self, sender, password, smtp_name, smtp_port, signature_img_name, signature_img_path, attachment_name, attachment_path): super().__init__(sender, password, smtp_name, smtp_port) self.signature_img_name = signature_img_name self.signature_img_path = signature_img_path self.attachment_name = attachment_name self.attachment_path = attachment_path def create_signature_image_object(self): img = open(self.signature_img_path + self.signature_img_name, 'rb') sgn_image = MIMEImage(img.read()) sgn_image.add_header('Content-ID', '<signature_image>') return sgn_image def create_attachment_object(self): binary = open(self.attachment_path + self.attachment_name, 'rb') payload = MIMEBase('application', 'octate-stream', Name=self.attachment_name) payload.set_payload(binary.read()) encoders.encode_base64(payload) payload.add_header('Content-Decomposition', 'attachment', filename=self.attachment_name) return payload def send_mail_with_attachment(self, receiver, subject, body): ''' In body, please notice that signature img is denoted by a tag: <img src="cid:signature_image"> it has to be at the end of html body of mail. Method calls other class methods to create objects as image and payload to use in mail. Image is a signature image. Payload is any attachment to the mail. ''' attachment_mail = Mail(self, receiver, subject, body) image = self.create_signature_image_object() payload = self.create_attachment_object() attachment_mail.message.attach(image) attachment_mail.message.attach(payload) attachment_mail.create_session() attachment_mail.attach_message() attachment_mail.send_mail()
python
/Users/NikhilArora/anaconda3/lib/python3.6/imp.py
python
# coding: utf-8 """Everythong related to parsing tracker responses""" import urlparse from lxml import etree class BaseParser(object): """Abstract base class for tracker response parser""" def parse_index(self, html): """Parse index html and return list of dicts""" raise NotImplementedError() def parse_torrent_page(self, html): """Parse torrent page and return dict""" raise NotImplementedError() def btih_from_href(url): """Extracts infohash from magnet link""" parsed = urlparse.urlparse(url) params = urlparse.parse_qs(parsed.query) xt = params['xt'][0] return xt[9:] def make_tree(html): """Make lxml.etree from html""" htmlparser = etree.HTMLParser(encoding='utf-8') return etree.fromstring(html, parser=htmlparser) class Error(RuntimeError): """Parse error""" pass
python
from compas.datastructures import Network def test_add_vertex(): network = Network() assert network.add_vertex() == 0 assert network.add_vertex(x=0, y=0, z=0) == 1 assert network.add_vertex(key=2) == 2 assert network.add_vertex(key=0, x=1) == 0
python
#!/usr/bin/python """ %prog [options] pair_1.fastq pair_2.fastq filter reads from paired fastq so that no unmatching reads remain. output files are pair_1.fastq.trim and pair_2.fastq.trim see: http://hackmap.blogspot.com/2010/09/filtering-paired-end-reads-high.html """ __version__ = "0.1.0" from subprocess import Popen, PIPE import sys FASTX_CLIPPER="fastx_clipper" FASTQ_QUALITY_TRIMMER="fastq_quality_trimmer" def gen_pairs(fha, fhb, min_len, fastq): def gen_headers(fastq): fq = open(fastq) r = fq.readline().rstrip("\r\n") while r: fq.readline() fq.readline() fq.readline() yield r[:-2] r = fq.readline().rstrip("\r\n") aread, bread = fha.readline, fhb.readline get_a = lambda: [aread().rstrip("\r\n") for i in range(4)] get_b = lambda: [bread().rstrip("\r\n") for i in range(4)] ah, bh = None, None header_gen = gen_headers(fastq) for header in header_gen: a = get_a() ah = a[0][:-2] b = get_b() bh = b[0][:-2] while not header in (ah, bh): header = header_gen.next() if bh != header: while ah != bh and ah: a = get_a() ah = a[0][:-2] while header != bh: header = header_gen.next() if ah != header: while ah != bh and bh: b = get_b() bh = b[0][:-2] while header != bh: header = header_gen.next() if not ah and bh: raise StopIteration assert ah == bh if len(a[1]) < min_len or len(b[1]) < min_len: continue yield a, b def main(adaptors, M, t, min_len, fastqs, sanger=False): cmds = [] for fastq in fastqs: cmd = [] for i, a in enumerate(adaptors): if M == 0: matches = len(a) else: matches = min(M, len(a)) cmd.append("%s -a %s -M %i %s -l 0" \ % (FASTX_CLIPPER, a, matches, "-Q 33" if sanger else "")) #, min_len)) trim_cmd = "%s -t %i -l 0" % (FASTQ_QUALITY_TRIMMER, t) #, min_len) if sanger: trim_cmd += " -Q 33" cmd.append(trim_cmd) cmd[0] += " < %s" % fastq cmds.append(" | ".join(cmd)) print "[running]:", cmds[-1] procs = [Popen(cmd, stdout=PIPE, shell=True) for cmd in cmds] trima = open("%s.trim" % fastqs[0], 'w') trimb = open("%s.trim" % fastqs[1], 'w') print >>sys.stderr, "writing %s and %s" % (trima.name, trimb.name) # no temporary file, just read from stdouts. for ra, rb in gen_pairs(procs[0].stdout, procs[1].stdout, min_len, fastqs[0]): print >>trima, "\n".join(ra) print >>trimb, "\n".join(rb) returncode = 0 for p in procs: p.wait() returncode |= p.returncode if returncode != 0: print >>sys.stderr, "ERROR: non-zero returncode from fastx toolkit" sys.exit(returncode) if __name__ == "__main__": import optparse p = optparse.OptionParser(__doc__) p.add_option("-a", dest="a", help="adaptor sequence to clip seperate multiples with ','", default="") p.add_option("-M", dest="M", help="require minimum adapter alignment length of N." " If less than N nucleotides aligned with the adapter - don't clip it." " default 0 means to require the full length of the adaptor to match. ", default=0, type='int') p.add_option("-t", dest="t", help="Quality threshold - nucleotides with lower" " quality will be trimmed (from the end of the sequence ", type='int', default=0) p.add_option("-l", dest="l", help="Minimum length - sequences shorter than this (after trimming)" "will be discarded. Default = 0 = no minimum length.", type="int", default=0) p.add_option("--sanger", dest="sanger", help="quality scores are ascii 33 sanger encoded (default is 64)", action="store_true") opts, fastqs = p.parse_args() fastqs[-1] = fastqs[-1].rstrip() if not (fastqs and len(fastqs)) == 2: sys.exit(p.print_help()) adaptors = [ad.strip() for ad in opts.a.split(",") if ad.strip()] main(adaptors, opts.M, opts.t, opts.l, fastqs, opts.sanger)
python
#!/usr/bin/env python3 # # debris.db -- database-related operations for debris import sqlite3 import time from . import common from .common import run_process from .common import getconfig from .common import log class DebrisDB(object): """Object that can represent the database connection. We are using sqlite3 as db. """ conn = None def __init__(self, dbpath: str = None): """Init the DebrisDB object. By default, the dbpath is given by loading config. """ if dbpath: my_dbpath = dbpath else: my_dbpath = getconfig('DEBRIS_DB_FILE') log.debug('connection sqlite db: {}'.format(my_dbpath)) self.conn = sqlite3.connect(my_dbpath) self._sanity_check() # TODO: Complete me def _sanity_check(self): """Run a sanity check. If there are any missing tables, create them. """ c = self.conn.cursor() c.execute('CREATE TABLE IF NOT EXISTS `builtpkg` (`package` TEXT NOT NULL, `version` TEXT NOT NULL);') c.execute('CREATE TABLE IF NOT EXISTS `command_history` (`timestamp` INTEGER NOT NULL, `CMDTYPE` TEXT NOT NULL, `OPERATION` TEXT);') c.execute('CREATE TABLE IF NOT EXISTS `build_history` (`timestamp` INTEGER NOT NULL, `package` TEXT NOT NULL, `version` TEXTNOT NULL, `status` INTEGER NOT NULL, `stdout` BLOB, `stderr` BLOB);') # TODO: recheck this pass def get_builtlist(self) -> list: """Retrieve a list for previously built packages. :example:: [{'package': 'nixnote2', 'version': '2.0~beta9-1'}, {'package': 'qevercloud', 'version': '3.0.3+ds-1'}] """ builtlist = [] c = self.conn.cursor() result = c.execute('SELECT `package`, `version` FROM `builtpkg`;').fetchall() for i in result: builtlist.append(dict(package=i[0], version=i[1])) return builtlist def log_transaction( self, package: str, version: str, status: bool, stdout: bytes = None, stderr: bytes = None, ): """Log one building attempt into the database. """ log.debug('logging build attempt...') _current_time = int(time.time()) c = self.conn.cursor() c.execute('INSERT INTO `build_history` (`timestamp`, `package`, `version`, `status`, `stdout`, `stderr`) VALUES (?, ?, ?, ?, ?, ?)', (_current_time, package, version, int(status), stdout, stderr,)) self.conn.commit()
python
#AUTOGENERATED BY NBDEV! DO NOT EDIT! __all__ = ["index", "modules", "custom_doc_links", "git_url"] index = {"extract_tag": "om2.ipynb", "contains_tag": "om2.ipynb", "is_nbx": "om2.ipynb", "is_nbx_cell": "om2.ipynb", "is_magic_or_shell": "om2.ipynb", "": "om2.ipynb", "strip": "om2.ipynb", "parse_xarg": "om2.ipynb", "get_imports_from_src": "om.ipynb", "Import": "om.ipynb", "create_import_statement": "om.ipynb", "extract_imports_from": "om.ipynb", "Bunch": "om2.ipynb", "load_nb": "om2.ipynb", "parse_src": "om.ipynb", "parse_nbx_cell": "om.ipynb", "concat": "om2.ipynb", "unzip": "om2.ipynb", "negate": "om2.ipynb", "is_constarg": "om2.ipynb", "get_item": "om2.ipynb", "get_items": "om2.ipynb", "not_constarg": "om2.ipynb", "parse_nb": "om.ipynb", "get_arrays": "om2.ipynb", "init_job": "om2.ipynb", "cont_job": "om2.ipynb", "chain_jobs": "om2.ipynb", "check_parsed_nb": "om.ipynb", "NbxBundle": "om.ipynb", "BUNDLE_SUMMARY": "om.ipynb", "regex_tag": "om2.ipynb", "regex_magic": "om2.ipynb", "parse_xarg_expr": "om2.ipynb", "regex_xarg": "om2.ipynb", "parse_src_with_parse_dict": "om2.ipynb", "parse_none": "om2.ipynb", "parse_nbx": "om2.ipynb", "parse_xuse": "om2.ipynb", "consume_line_below": "om2.ipynb", "parse_nbx_cell_with_parse_dict": "om2.ipynb", "PARSE_DICT": "om2.ipynb", "parse_nb_with_parse_dict": "om2.ipynb", "get_arrays_2": "om2.ipynb", "chain_jobs_2": "om2.ipynb", "add_if_necessary": "om2.ipynb", "create_script": "om2.ipynb", "create_om_files": "om2.ipynb", "create_folders": "om2.ipynb", "create_run_and_job_script": "om2.ipynb", "create_job_script": "om2.ipynb", "check_nb": "om2.ipynb", "create_experiment_script": "om2.ipynb", "create_raw_experiment": "om2.ipynb", "tpath": "om2.ipynb", "INSTRUCTIONS": "om2.ipynb", "Axis": "pspace.ipynb", "ParameterSpace": "pspace.ipynb", "get_templ_args": "templ.ipynb", "render_templ": "templ.ipynb", "create_file_from_template": "templ.ipynb", "render_template_from_string": "templ.ipynb"} modules = ["om.py", "om2.py", "pspace.py", "templ.py"] git_url = "https://github.com/mirkoklukas/nbx/tree/master/" def custom_doc_links(name): return None
python
#Создай собственный Шутер! from pygame import * from random import randint from time import time as timer mixer.init() mixer.music.load('Fonk.ogg') mixer.music.play(-1) mixer.music.set_volume(0.2) fire_sound = mixer.Sound('blaster.ogg') fire_sound.set_volume(0.1) font.init() font1 = font.SysFont('Arial',80) win = font1.render('YOU WIN!!!', True,(255,255,255)) lose = font1.render('YOU LOSE!!!', True,(255,0,0)) font2 = font.SysFont('Arial',36) img_back = 'galaxy.jpg' img_hero = 'rrocket.png' img_enemy = 'ufo.png' img_bullet = 'bullet.png' img_rocket = 'oruzhie.png' img_kunai = 'kunai.png' img_ast = 'asteroid.png' score = 0 goal = 20 lost = 0 max_lost = 10 life = 3 class GameSprite(sprite.Sprite): def __init__(self,player_image,player_x,player_y,size_x,size_y,player_speed): sprite.Sprite.__init__(self) self.image = transform.scale(image.load(player_image), (size_x,size_y)) self.speed = player_speed self.rect = self.image.get_rect() self.rect.x = player_x self.rect.y = player_y def reset(self): window.blit(self.image, (self.rect.x, self.rect.y)) class Player(GameSprite): def update(self): keys = key.get_pressed() if keys[K_LEFT] and self.rect.x > 5: self.rect.x-=self.speed if keys[K_RIGHT] and self.rect.x < win_width - 80: self.rect.x+=self.speed def fire(self): bullet = Bullet(img_bullet,self.rect.centerx,self.rect.top,15, 20, -15) bullets.add(bullet) def kunai(self): kunai = Bullet(img_kunai,self.rect.centerx,self.rect.top,15, 20, -15) bullets.add(kunai) def rocket(self): rocket = Bullet(img_rocket,self.rect.centerx,self.rect.top,15, 20, -15) bullets.add(rocket) class Bullet(GameSprite): def update (self): self.rect.y += self.speed if self.rect.y < 0: self.kill() class Enemy(GameSprite): def update(self): self.rect.y += self.speed global lost if self.rect.y > win_heigh: self.rect.x = randint(80, win_width-80) self.rect.y = 0 lost = lost + 1 win_width = 700 win_heigh = 500 window = display.set_mode((win_width, win_heigh)) display.set_caption("Shooter") backgroun = transform.scale(image.load(img_back), (win_width,win_heigh)) ship = Player(img_hero,5,win_heigh-100,80,100,17) monsters = sprite.Group() bullets = sprite.Group() asteroids = sprite.Group() for i in range(1,6): monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(3,5)) monsters.add(monster) for i in range(1,3): asteroid = Enemy(img_ast, randint(30, win_width - 30), -40, 80, 50, randint(3,5)) asteroids.add(asteroid) finish = False run = True game = True rel_time = False num_fire = 0 while game: for e in event.get(): if e.type == QUIT: game = False elif e.type == KEYDOWN: if e.key == K_SPACE: if num_fire < 5 and rel_time == False: num_fire = num_fire + 1 fire_sound.play() ship.fire() if num_fire >= 5 and rel_time == False: last_time = timer() rel_time = True elif e.key == K_TAB: if num_fire < 5 and rel_time == False: num_fire = num_fire + 1 fire_sound.play() ship.kunai() elif e.key == K_LCTRL: if num_fire < 5 and rel_time == False: num_fire = num_fire + 1 fire_sound.play() ship.rocket() if not finish: window.blit(backgroun,(0,0)) ship.reset() ship.update() monsters.update() monsters.draw(window) bullets.update() bullets.draw(window) asteroids.update() asteroids.draw(window) if rel_time == True: now_time = timer() if now_time - last_time < 3: reload = font2.render('Wait, reload...', 1, (150,0,0)) window.blit(reload, (260, 460)) else: num_fire = 0 rel_time = False collides = sprite.groupcollide(monsters,bullets, True, True) for c in collides: score = score + 1 monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(2,4)) monsters.add(monster) if sprite.spritecollide(ship, monsters, False) or sprite.spritecollide(ship, asteroids, False): sprite.spritecollide(ship, monsters, True) sprite.spritecollide(ship, asteroids, True) life = life - 1 if life == 0 or lost >= max_lost: finish = True window.blit(lose, (200,200)) if score >= goal: finish = True window.blit(win, (200,200)) text = font2.render('Сбито:' + str(score),1,(255,255,255)) window.blit(text, (10,20)) text_lose = font2.render('Пропущенно:' + str(lost),1,(255,255,255)) window.blit(text_lose, (10,50)) if life == 3: life_color = (0, 255, 0) if life == 2: life_color = (255, 255, 0) if life == 1: life_color = (255, 0, 0) text_life = font1.render(str(life), 1, life_color) window.blit(text_life, (650,10)) display.update() else: finish = False score = 0 lost = 0 num_fire = 0 life = 3 for b in bullets: b.kill() for m in monsters: m.kill() time.delay(3000) for i in range(1,6): monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(2,4)) monsters.add(monster) for i in range(1,3): asteroid = Enemy(img_ast, randint(30, win_width - 30), -40, 80, 50, randint(3,5)) asteroids.add(asteroid) time.delay(50)
python
#!/usr/bin/env python3 # # kcri.bap.shims.cgMLSTFinder - service shim to the cgMLSTFinder backend # import os, json, tempfile, logging from pico.workflow.executor import Task from pico.jobcontrol.job import JobSpec, Job from .base import ServiceExecution, UserException from .versions import BACKEND_VERSIONS # Our service name and current backend version SERVICE, VERSION = "cgMLSTFinder", BACKEND_VERSIONS['cgmlstfinder'] # Backend resource parameters: cpu, memory, disk, run time reqs MAX_CPU = 1 MAX_MEM = 1 MAX_TIM = 10 * 60 class cgMLSTFinderShim: '''Service shim that executes the backend.''' def execute(self, sid, xid, blackboard, scheduler): '''Invoked by the executor. Creates, starts and returns the Task.''' # Check whether running is applicable, else throw to SKIP execution scheme_lst = list(filter(None, blackboard.get_user_input('cq_s','').split(','))) species_lst = blackboard.get_species(list()) if not (scheme_lst or species_lst): raise UserException("no species is known and no cgMLST scheme specified") execution = cgMLSTExecution(SERVICE, VERSION, sid, xid, blackboard, scheduler) # From here run the execution, and FAIL it on exception try: db_dir = execution.get_db_path('cgmlstfinder') db_cfg = os.path.join(db_dir, 'config') # Note we do only one fq fname = execution.get_fastqs_or_contigs_paths([])[0] schemes = self.determine_schemes(db_cfg, scheme_lst, species_lst) execution.start(schemes, fname, db_dir) # Failing inputs will throw UserException except UserException as e: execution.fail(str(e)) # Deeper errors additionally dump stack except Exception as e: logging.exception(e) execution.fail(str(e)) return execution def determine_schemes(self, db_cfg, scheme_lst, species_lst): '''Reads the database config to find out which schemes to run for the given scheme and species lists. Returns a list of (scheme,loci) tuples or raises a user interpretable error.''' schemes = list() spc_db = dict() if not os.path.exists(db_cfg): raise UserException("no database config file: %s" % db_cfg) with open(db_cfg, 'r') as f: for l in f: l = l.strip() if not l or l.startswith('#'): continue r = l.split('\t') if not len(r) == 3: continue spc_db[r[1].strip()] = r[0].strip() for db in scheme_lst: if not db in spc_db.values(): raise UserException("unknown scheme: %s; valid schemes are: %s" % (db, ', '.join(spc_db.values()))) elif not db in schemes: schemes.append(db) for s in species_lst: if s.startswith('Shigella'): s = 'Escherichia coli' # argh: should be fixed in config db = spc_db.get(s.split(' ')[0], spc_db.get(s)) if db and not db in schemes: schemes.append(db) if not schemes: raise UserException("no applicable cgMLST scheme") return schemes class cgMLSTExecution(ServiceExecution): '''A single execution of the service, returned by the shim's execute().''' _jobs = list() def start(self, schemes, fname, db_dir): # Schedule a backend job for every scheme if all is good if self.state == Task.State.STARTED: for scheme in schemes: self.run_scheme(scheme, fname, db_dir) def run_scheme(self, scheme, fname, db_dir): '''Spawn cgMLST for one scheme.''' # Create a command line for the job tmpdir = tempfile.TemporaryDirectory() params = [ '-db', db_dir, '-t', tmpdir.name, # '-o', '.', '-s', scheme, fname ] # Spawn the job and hold a record in the jobs table job_spec = JobSpec('cgMLST.py', params, MAX_CPU, MAX_MEM, MAX_TIM) job = self._scheduler.schedule_job('cgmlst_%s' % scheme, job_spec, os.path.join(SERVICE,scheme)) self._jobs.append((job, scheme, tmpdir)) def report(self): '''Implements WorkflowService.Task.report(), update blackboard if we are done and return our current state.''' # If our outward state is STARTED check the jobs if self.state == Task.State.STARTED: # We may be running no jobs at all if no scheme applied if len(self._jobs) == 0: self.add_warning("no cgMLST scheme was found for the species") self.store_results(list()) self.done() # Else we report only once all our jobs are done elif all(j[0].state in [ Job.State.COMPLETED, Job.State.FAILED ] for j in self._jobs): typings = list() for job, scheme, tmpdir in self._jobs: if job.state == Job.State.COMPLETED: typings.append(self.collect_output(job, scheme)) elif job.state == Job.State.FAILED: self.add_error('%s: %s' % (job.name, job.error)) tmpdir.cleanup() # Store result self.store_results(typings) # Report fail if none of the runs succeeded if any(j[0].state == Job.State.COMPLETED for j in self._jobs): self.done() else: self.fail('no successful cgMLSTFinder job') return self.state def collect_output(self, job, scheme): typing = dict({'scheme': scheme }) try: with open(job.file_path('data.json'), 'r') as f: j = json.load(f) d = j.get('cgMLSTFinder').get('results') if d: # There should be at most one, as we have 1 FA or 1 fastq hit = list(d.values())[0] typing.update(hit) self._blackboard.add_cgmlst(scheme, hit.get('cgST', 'NA'), hit.get('perc_allele_matches', 'NA')) except Exception as e: typing['error'] = "cgMLSTFinder ran successfully but output could not be parsed: %s" % str(e) return typing if __name__ == '__main__': main()
python
import time import pytest import examples import progressbar import original_examples def test_examples(monkeypatch): for example in examples.examples: try: example() except ValueError: pass @pytest.mark.filterwarnings('ignore:.*maxval.*:DeprecationWarning') @pytest.mark.parametrize('example', original_examples.examples) def test_original_examples(example, monkeypatch): monkeypatch.setattr(progressbar.ProgressBar, '_MINIMUM_UPDATE_INTERVAL', 1) monkeypatch.setattr(time, 'sleep', lambda t: None) example() @pytest.mark.parametrize('example', examples.examples) def test_examples_nullbar(monkeypatch, example): # Patch progressbar to use null bar instead of regular progress bar monkeypatch.setattr(progressbar, 'ProgressBar', progressbar.NullBar) assert progressbar.ProgressBar._MINIMUM_UPDATE_INTERVAL < 0.0001 example() def test_reuse(): import progressbar bar = progressbar.ProgressBar() bar.start() for i in range(10): bar.update(i) bar.finish() bar.start(init=True) for i in range(10): bar.update(i) bar.finish() bar.start(init=False) for i in range(10): bar.update(i) bar.finish() def test_dirty(): import progressbar bar = progressbar.ProgressBar() bar.start() for i in range(10): bar.update(i) bar.finish(dirty=True)
python
from .default import Config class DevelopmentConfig(Config): """ Configurations for Development. """ DEBUG = True TESTING = True SECRET = "DevelopSecret123!!" # pragma: allowlist secret
python
import numpy as np NUM_EXP = 1 def evaluate(job_id, params): np.random.seed(NUM_EXP) x = params['X'] y = params['Y'] z = params['Z'] a = params['A'] #print 'Evaluating at (%f, %f, %f, %f)' % (x, y, z, a) obj1 = float(1.10471 * np.power(x,2.0) * z + 0.04811 * a * y * (14.0+z)) + np.random.normal(0,3.2) obj2 = float(2.1952 / float((np.power(a,3.0)*y))) + np.random.normal(0,175) c1 = (float(13600.0-np.power(np.power(6000.0/(np.power(2,0.5)*x*z),2.0)+ np.power( (6000.0*(14.0+0.5*z)*np.power(0.25*(np.power(z,2.0)+np.power(x+a,2.0)),0.5)/(2*np.power(2.0,0.5)*x*z*(np.power(z,2.0)/(12.0)+0.25*np.power(x+a,2.0)))) ,2.0) + z * 6000.0/(np.power(2,0.5)*x*z) * ((6000.0*(14.0+0.5*z)*np.power(0.25*(np.power(z,2.0)+np.power(x+a,2.0)),0.5)/(2*np.power(2.0,0.5)*x*z*(np.power(z,2.0)/(12.0)+0.25*np.power(x+a,2.0))))) / (np.power(0.25*(np.power(z,2.0)+np.power(x+a,2.0)),0.5)),0.5)) + np.random.normal(0,3)) / 75842.5359709 c2 = (30000.0-504000/(np.power(a,2.0)*y) + np.random.normal(0,0.5)) / 8526363.04783 c3 = (y - x + np.random.normal(0,0.05)) / 2.01692584516 c4 = (64746.022 * (1.0 - 0.0282346 * a) * a *np.power(y, 3.0) - 6000.0 + np.random.normal(0,0.05)) / 11617706.4105 return { "o1" : obj1, "o2" : obj2, "c1" : c1, "c2" : c2, "c3" : c3, "c4" : c4 } def main(job_id, params): try: return evaluate(job_id, params) except Exception as ex: print ex print 'An error occurred in mocotoy_con.py' return np.nan if __name__ == "__main__": main(0, {u'X': np.array([ 5.0 ]), u'Y': np.array([ 2.8 ]), u'Z': np.array([ 5.0 ]), u'A': np.array([ 2.8 ])})
python
class Config: HOST_URL = "https://www.mirrativ.com" USER_AGENT = "MR_APP/8.67.0/Android/GA00747-UK/5.1.1" USER_ME = "/api/user/me" PROFILE_EDIT = "/api/user/profile_edit" FOLLOW = "/api/graph/follow" COMMENT = "/api/live/live_comment" LIVE = "/api/live/live" EDIT_LIVE = "/api/live/live_edit" CREATE_LIVE = "/api/live/live_create" STREAM_URL = "/api/live/get_streaming_url" GET_COMMENT = "/api/live/live_comments" LIVE_POLLING = "/api/live/live_polling" LIVE_REQUESTS = "/api/user/post_live_request" EDIT_PROFILE = "/api/user/profile_edit" BUY_AVATAR = "/api/avatar/purchase_avatars" UPDATE_AVATAR = "/api/closet/update_closet_avatar"
python
import os import torch import numpy as np import pickle from utils import * def hook_fn(m, i, o): try: visualisation[m] = o.cpu().numpy() except AttributeError: visualisation[m] = o[0].cpu().numpy() if __name__=='__main__': with open('./results/cka/act_std.pkl', 'rb') as file: act_std = pickle.load(file) file.close() file = open('./results/cka/act_adv.pkl', 'rb') act_adv = pickle.load(file) file.close() file = open('./results/cka/act_bn0.pkl', 'rb') act_bn0 = pickle.load(file) file.close() file = open('./results/cka/act_bn1.pkl', 'rb') act_bn1 = pickle.load(file) file.close() ''' ckas_sa = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_self = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_aself = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_bns = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_bns_ = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_bna = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_bna_ = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_bnsa = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ''' ckas_bn1 = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) ckas_bn0 = np.zeros((len(list(act_std.values())), len(list(act_std.values())))) assert len(list(act_std.values())) == len(list(act_bn0.values())) ckas = [] for i in range(len(list(act_std.values()))): for j in range(len(list(act_std.values()))): # X_s = list(act_std.values())[i].reshape(196, -1) # X_a_ = list(act_adv.values())[i].reshape(196, -1) # X_s_ = list(act_std.values())[j].reshape(196, -1) # X_a = list(act_adv.values())[j].reshape(196, -1) # try: X_bn0_ = list(act_bn0.values())[i].reshape(196, -1) X_bn0 = list(act_bn0.values())[j].reshape(196, -1) X_bn1_ = list(act_bn1.values())[i].reshape(196, -1) X_bn1 = list(act_bn1.values())[j].reshape(196, -1) # except AttributeError: # X_bn0_ = list(act_bn0.values())[i][0].reshape(196, -1) # X_bn0 = list(act_bn0.values())[j][0].reshape(196, -1) # X_bn1 = list(act_bn1.values())[j][0].reshape(196, -1) ''' ckas_sa[i][j] = cka(gram_linear(X_s), gram_linear(X_a), debiased=True) ckas_self[i][j] = cka(gram_linear(X_s), gram_linear(X_s_), debiased=True) ckas_aself[i][j] = cka(gram_linear(X_a_), gram_linear(X_a), debiased=True) ckas_bns[i][j] = cka(gram_linear(X_s), gram_linear(X_bn0), debiased=True) ckas_bns_[i][j] = cka(gram_linear(X_s), gram_linear(X_bn1), debiased=True) ckas_bnsa[i][j] = cka(gram_linear(X_bn0_), gram_linear(X_bn1), debiased=True) ckas_bna[i][j] = cka(gram_linear(X_a_), gram_linear(X_bn0), debiased=True) ckas_bna_[i][j] = cka(gram_linear(X_a_), gram_linear(X_bn1), debiased=True) ''' ckas_bn0[i][j] = cka(gram_linear(X_bn0_), gram_linear(X_bn0), debiased=True) ckas_bn1[i][j] = cka(gram_linear(X_bn1_), gram_linear(X_bn1), debiased=True) # ckas.append(ckas_sa) # ckas.append(ckas_self) # ckas.append(ckas_aself) # ckas.append(ckas_bns) # ckas.append(ckas_bns_) # ckas.append(ckas_bna) # ckas.append(ckas_bna_) # ckas.append(ckas_bnsa) ckas.append(ckas_bn0) ckas.append(ckas_bn1) np.save('./results/ckas_.npy', np.array(ckas))
python
# https://leetcode.com/problems/subsets-ii/description/ # # algorithms # Medium (40.24%) # Total Accepted: 173.2K # Total Submissions: 430.4K # beats 100.0% of python submissions class Solution(object): def subsetsWithDup(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ length = len(nums) res = set() def resursive(idx, path): res.add(tuple(sorted(path))) if idx == length: return for i in xrange(idx, length): resursive(i + 1, path + [nums[i]]) resursive(0, []) return [list(path) for path in res]
python
""" retrieve environment variables and resolve references to AWS Parameter Store Parameters. """ from typing import Dict import os import boto3 def get(name: str, session: boto3.session.Session) -> str: """ gets the environment variable value specified by `name`. if the `value` starts with ssm://, it will return the value of the SSM parameter with the specified name. The resulting value is cached, so subsequent requests will return the same value. """ if name in _cache: return _cache[name] value = os.getenv(name) if value and value.startswith("ssm://"): response = session.client("ssm").get_parameter( Name=value[6:], WithDecryption=True ) value = response["Parameter"]["Value"] _cache[name] = value return value # cache of retrieved environment variables _cache: Dict[str, str] = {}
python
""" A repository of typed entities, retrievable by their external reference Entity object API: entity.entity_type --> string used for groupung entity.external_ref --> lookup name entity.origin --> one-time settable parameter, set by the entity store entity.validate() --> must return True [for valid entities and False for invalid ones] entity.name --> printable name Optional: entity.uuid --? used for entity retrieval """ from __future__ import print_function, unicode_literals import uuid import re import os from datetime import datetime from collections import defaultdict from antelope import local_ref from ..from_json import to_json # CatalogRef = namedtuple('CatalogRef', ['archive', 'id']) ref_regex = re.compile('[a-z0-9_]+(\.[a-z0-9_]+)*', flags=re.IGNORECASE) uuid_regex = re.compile('([0-9a-f]{8}-?([0-9a-f]{4}-?){3}[0-9a-f]{12})', flags=re.IGNORECASE) def to_uuid(_in): if _in is None: return _in if isinstance(_in, int): return None try: g = uuid_regex.search(_in) # using the regexp test is 50% faster than asking the UUID library except TypeError: if isinstance(_in, uuid.UUID): return str(_in) g = None if g is not None: return g.groups()[0] ''' # no regex match- let's see if uuid.UUID can handle the input try: _out = uuid.UUID(_in) except ValueError: return None return str(_out) ''' ## NOTE: This is costly because it requires to instantiate a UUID for EVERY query, especially those that are # already probably not valid UUIDs! There is every reason to expect the input is a string, and our regex already # matches even non-RFC-compliant UUID strings. I'm going to leave it out for now return None class SourceAlreadyKnown(Exception): pass class EntityExists(Exception): pass class InvalidSemanticReference(Exception): pass class ReferenceCreationError(Exception): pass class EntityStore(object): _entity_types = () # must be overridden ''' _ns_uuid_required: specifies whether the archive must be supplied an ns_uuid (generally, archives that are expected to generate persistent, deterministic IDs must have an externally specified ns_uuid) If False: random ns_uuid generated if none is supplied If True: ns_uuid must be supplied as an argument, will raise exception if missing If None: ns_uuid forced to None - store does not have ns_uuid capabilities ''' _ns_uuid_required = False _origin = None # can be set when a catalog is assigning a ref def _ref_to_uuid(self, key): """ This tries to find a UUID from a ref. Not sure what this is good for. by default, to_uuid just returns a string matching the regex, or failing that, tries to generate a string using uuid.UUID(key) :param key: :return: """ u = to_uuid(key) # check if key is already a uuid if u is None: return self._ref_to_nsuuid(key) return u def _ref_to_nsuuid(self, key): if self._ns_uuid is None: return None if isinstance(key, int): key = str(key) return str(uuid.uuid3(self._ns_uuid, key)) def _ref_to_key(self, key): """ This method always returns a valid key into _entities, or None. May be overridden. :param key: :return: """ if key in self._entities: return key uu = self._ref_to_uuid(key) if uu is not None: if uu in self._entities: return uu def get_uuid(self, key): """ Deprecated. :param key: :return: """ return self._ref_to_uuid(key) def _set_ns_uuid(self, ns_uuid): print('%s: Setting NSUUID (%s) %s' % (self.ref, self._ns_uuid_required, ns_uuid)) if self._ns_uuid_required is None: if ns_uuid is not None: print('Ignoring ns_uuid specification') return None else: if ns_uuid is None: if self._ns_uuid_required is True: raise AttributeError('ns_uuid specification required') elif self._ns_uuid_required is False: return uuid.uuid4() else: if isinstance(ns_uuid, uuid.UUID): return ns_uuid return uuid.UUID(ns_uuid) def __init__(self, source, ref=None, quiet=True, upstream=None, static=False, dataReference=None, ns_uuid=None, no_validate=None, **kwargs): """ An EntityStore is a provenance structure for a collection of entities. Ostensibly, an EntityStore has a single source from which entities are collected. The source is a resolvable URI that indicates a data resource from which data describing the entities can be extracted. The exact manner of extracting data from resources is subclass-dependent. Internally, all entities are stored with UUID keys. If the external references do not contain UUIDs, it is recommended to derive a UUID3 using an archive-specific, stable namespace ID. The class-level _ns_uuid_required attribute governs this option: - if True, an ns_uuid argument must be provided when the class is instantiated. This is consistent with a use case in which it is desirable to have predictable, fixed UUIDs (i.e. to interface with a data system that requires stable UUIDs) - if False, a random ns_uuid is generated, and used to create a UUID anytime an entity is given a non-UUID external_ref - if None, UUID3 are not used and any supplied ns_uuid argument is ignored. external_refs must always be UUIDs. There is still some refactoring to be done, to try to eliminate the need for externally visible UUIDs anywhere. An archive has a single semantic reference that describes the data context from which its native entities were gathered. The reference is given using dot-separated hierarchical terms in order of decreasing semantic significance from left to right. The leftmost specifier should describe the maintainer of the resource (which defaults to 'local' when a reference argument is not provided), followed by arbitrarily more precise specifications. Some examples are: local.lcia.traci.2.1.spreadsheet ecoinvent.3.2.undefined The purpose for the source / reference distinction is that in principle many different sources can all provide the same semantic content: for instance, ecoinvent can be accessed from the website or from a file on the user's computer. In principle, if the semantic reference for two archives is the same, the archives should contain excerpts of the same data, even if drawn from different sources. An entity is uniquely identified by its link property, which is made from concatenating the semantic origin and a stable reference known as an 'external_ref', as 'origin/external_ref'. The first slash is the delimiter between origin and reference. Examples: elcd.3.2/processes/00043bd2-4563-4d73-8df8-b84b5d8902fc uslci.ecospold/Acetic acid, at plant Note that the inclusion of embedded whitespace, commas, and other characters indicate that these semantic references are not proper URIs. It is hoped that the user community will help develop and maintain a consistent and easily interpreted namespace for semantic references. If this is done, it should be possible to identify any published entity with a concise reference. When an entity is first added to an archive, it is assigned that archive's *reference* as its origin, following the expectation that data about the same reference from different sources is the same data. When an entity with a different origin is added to an archive, it is good practice to add a mapping from that origin to its source in the receiving archive's "catalog_names" dictionary. However, since the entity itself does not know its archive's source, this cannot be done automatically. :param source: physical data source-- where the information is being drawn from :param ref: optional semantic reference for the data source. gets added to catalog_names. :param quiet: :param upstream: :param static: [False] whether archive is expected to be unchanging. :param dataReference: alternative to ref :param ns_uuid: required to store entities by common name. Used to generate uuid3 from string inputs. :param no_validate: if True, skip validation on entity add :param kwargs: any other information that should be serialized with the archive """ self._source = source if ref is None: if dataReference is None: ref = local_ref(source) else: ref = dataReference self._entities = {} # uuid-indexed list of known entities self._quiet = quiet # whether to print out a message every time a new entity is added / deleted / modified self._serialize_dict = kwargs # this gets added to self._counter = defaultdict(int) self._ents_by_type = defaultdict(set) self._upstream = None self._no_validate = no_validate self._loaded = False self._static = static self._descendant = False if upstream is not None: self.set_upstream(upstream) self._catalog_names = defaultdict(set) # this is a place to map semantic references to data sources self._add_name(ref, source) self._serialize_dict['dataReference'] = ref self._ns_uuid = self._set_ns_uuid(ns_uuid) if self._ns_uuid is not None: self._serialize_dict['ns_uuid'] = str(self._ns_uuid) def _add_name(self, ref, source, rewrite=False): """ A source is not allowed to provide multiple semantic references a ref must match the regexp ([A-Za-z0-9_]+(\.[A-Za-z0-9_])*) :param ref: :param source: :param rewrite: [False] if True, if SourceAlreadyKnown, re-assign the source to the new ref. This may result in the archive's ref changing, and should only be used when an authoritative source-ref pair is supplied (e.g. a JSON file that was loaded into the archive) :return: """ if not ref_regex.match(ref): raise InvalidSemanticReference('%s' % ref) for k, s in self._catalog_names.items(): if source in s and source is not None: if source == self.source and k == local_ref(self.source): '''if we're trying to add our own source and ref to the name index, and the source is currently registered to the default local_ref, then we override it ''' self._catalog_names[ref] = self._catalog_names.pop(k) return if k == ref or ref.startswith(k): return if rewrite: self._catalog_names[k].remove(source) print('%s: <source removed>' % k) else: raise SourceAlreadyKnown('Source %s already registered to name %s (vs: %s)' % (source, k, ref)) print('%s: %s' % (ref, source)) self._catalog_names[ref].add(source) if ref == self.ref and self.source is None and rewrite: self._source = source def add_new_source(self, new_ref, new_source): self._add_name(new_ref, new_source, rewrite=False) @property def source(self): """ The catalog's original source is the "master descriptor" of the catalog's content. This is required for subclass methods to work properly, in the event that the original source is called upon. :return: """ return self._source def _set_source(self, new_ref, new_source): self._source = new_source self._add_name(new_ref, new_source) self._descendant = True def set_origin(self, origin): self._serialize_dict['dataReference'] = origin self._add_name(origin, self.source, rewrite=True) self._origin = origin @property def ref(self): if self._origin is not None: return self._origin try: return next(k for k, s in self._catalog_names.items() if self.source in s) except StopIteration: return local_ref(self.source) @property def catalog_names(self): for k in self._catalog_names.keys(): yield k @property def names(self): """ Return a mapping of data source to semantic reference, based on the catalog_names property. This is used by a catalog interface to convert entity origins from physical to semantic. If a single data source has multiple semantic references, only the most-downstream one will be kept. If there are multiple semantic references for the same data source in the same archive, one will be kept at random. This should be avoided and I should probably test for it when setting catalog_names. :return: """ if self._upstream is None: names = dict() else: names = self._upstream.names for k, s in self._catalog_names.items(): for v in s: names[v] = k return names def get_sources(self, name): s = self._catalog_names[name] if len(s) == 0: for k, ss in self._catalog_names.items(): if k.startswith(name): s = s.union(ss) for d in s: yield d def construct_new_ref(self, signifier): today = datetime.now().strftime('%Y%m%d') if signifier is None: new_tail = today else: if not bool(re.match('[A-Za-z0-9_-]+', signifier)): raise ValueError('Invalid signifier %s' % signifier) new_tail = '.'.join([signifier, datetime.now().strftime('%Y%m%d')]) if len(self.ref.split('.')) > 2: # must be true to be postfixing a postfix old_tail = '.'.join(self.ref.split('.')[-2:]) if old_tail.startswith(new_tail): hm = '.' + datetime.now().strftime('-%H%M') if old_tail.startswith(new_tail + hm): hm += datetime.now().strftime('%S') if old_tail.startswith(new_tail + hm): raise ReferenceCreationError('HMS? %s', (self.ref, hm)) new_tail += hm elif old_tail.find('.' + today) >= 0 and signifier is not None: # don't reprint the date if it already shows up new_tail = signifier new_ref = '.'.join([self.ref, new_tail]) return new_ref def create_descendant(self, archive_path, signifier=None, force=False): """ Saves the archive to a new source with a new semantic reference. The new semantic ref is derived by (a) first removing any trailing ref that matches [0-9]{8+} (b) appending the descendant signifier (c) appending the current date in YYYYMMDD format After that: 1. The new semantic ref is added to catalog_names, 2. the source is set to archive_path/semantic.ref.json.gz, 3. load_all() is executed, 4. the archive is saved to the new source. :param archive_path: where to store the archive :param signifier: A nonzero-length string matching [A-Za-z0-9_-]+. If not supplied, then the semantic ref is unchanged except for the date tag. :param force: overwrite if file exists :return: new semantic ref. """ if not os.path.exists(archive_path): os.makedirs(archive_path) new_ref = self.construct_new_ref(signifier) if new_ref == self.ref: raise KeyError('Refs are the same!') # KeyError bc it's a key in catalog_names new_filename = new_ref + '.json.gz' new_source = os.path.join(archive_path, new_filename) if os.path.exists(new_source): if force: print('Overwriting existing archive') else: raise EnvironmentError('File %s exists: force=True to overwrite' % new_source) try: self.load_all() except NotImplementedError: pass self._set_source(new_ref, new_source) self.write_to_file(new_source, gzip=True, complete=True) return new_ref @property def static(self): return self._static or self._loaded ''' @property def ref(self): """ Deprecated. Archives have a source; catalogs have a ref. :return: """ return self._source ''' def entities(self): for v in self._entities.values(): yield v def set_upstream(self, upstream): assert isinstance(upstream, EntityStore) if upstream.source != self.source: self._serialize_dict['upstreamReference'] = upstream.ref self._upstream = upstream ''' def truncate_upstream(self): """ BROKEN! / deprecated removes upstream reference and rewrites entity uuids to match current index. note: deprecates the upstream upstream_ :return: """ # TODO: this needs to be fixed: truncate needs localize all upstream entities (retaining their origins) for k, e in self._entities.items(): e._uuid = k self._upstream = None if 'upstreamReference' in self._serialize_dict: self._serialize_dict.pop('upstreamReference') ''' def _print(self, *args): if self._quiet is False: print(*args) def __str__(self): count = sum(len(v) for v in self._ents_by_type.values()) s = '%s with %d entities at %s' % (self.__class__.__name__, count, self.source) if self._upstream is not None: s += ' [upstream %s]' % self._upstream.__class__.__name__ return s def _get_entity(self, key): """ the fundamental method- retrieve an entity from LOCAL collection by key, nominally a UUID string. If the string is not found, raises KeyError. :param key: a uuid :return: the LcEntity or None """ if key in self._entities: return self._entities[key] raise KeyError(key) def __contains__(self, item): return item in self._entities def __getitem__(self, item): """ CLient-facing entity retrieval. item is a key that can be converted to a valid UUID from self._ref_to_key()-- either a literal UUID, or a string containing something matching a naive UUID regex. First checks upstream, then local. Returns None if nothing is found :param item: :return: """ if item is None: return None if self._upstream is not None: e = self._upstream[item] if e is not None: return e try: if isinstance(item, int) and self._ns_uuid is not None: return self._get_entity(self._ref_to_nsuuid(item)) return self._get_entity(self._ref_to_key(item)) except KeyError: return None def _ensure_valid_refs(self, entity): """ Hook to validate the incoming entity's references-- namely, to set its uuid :param entity: :return: """ if hasattr(entity, 'uuid') and entity.uuid is None: uu = self._ref_to_uuid(entity.external_ref) if uu is not None: entity.uuid = uu def _add(self, entity, key, quiet=False): self._ensure_valid_refs(entity) if key is None: raise ValueError('Key not allowed to be None') if key in self._entities: raise EntityExists('Entity already exists: %s' % key) if entity.entity_type not in self._entity_types: raise TypeError('Entity type %s not valid!' % entity.entity_type) if entity.is_entity and not self._no_validate: if not entity.validate(): raise ValueError('Entity fails validation: %s' % repr(entity)) if not (self._quiet or quiet): print('Adding %s entity with %s: %s' % (entity.entity_type, key, entity['Name'])) if entity.origin is None: # TODO: uncomment / enforce this # assert self._ref_to_key(entity.external_ref) == key, 'entity uuid must match origin repository key!' entity.origin = self.ref self._entities[key] = entity if self._ns_uuid is not None: # ensure UUID3s work even if custom UUIDs are specified nsuuid = self._ref_to_uuid(entity.external_ref) if nsuuid is not None and nsuuid not in self._entities: self._entities[nsuuid] = entity self._counter[entity.entity_type] += 1 self._ents_by_type[entity.entity_type].add(key) # it's not ok to change an entity's type def check_counter(self, entity_type=None): if entity_type is None: [self.check_counter(entity_type=k) for k in self._entity_types] else: print('%d new %s entities added (%d total)' % (self._counter[entity_type], entity_type, self.count_by_type(entity_type))) self._counter[entity_type] = 0 def find_partial_id(self, uid, upstream=False, startswith=True): """ :param uid: is a fragmentary (or complete) uuid string. :param upstream: [False] whether to look upstream if it exists :param startswith: [True] use .startswith instead of full regex :return: result set """ if startswith: def test(x, y): return y.startswith(x) else: def test(x, y): return bool(re.search(x, y)) result_set = [v for k, v in self._entities.items() if test(uid, k)] if upstream and self._upstream is not None: result_set += self._upstream.find_partial_id(uid, upstream=upstream, startswith=startswith) return result_set def _fetch(self, entity, **kwargs): """ Dummy function to fetch from archive. MUST be overridden. Can't fetch from upstream. :param entity: :return: """ raise NotImplementedError def retrieve_or_fetch_entity(self, key, **kwargs): """ Client-facing function to retrieve entity by ID, first checking in the archive, then from the source. Input is flexible-- could be a UUID or key (partial uuid is just not useful) :param key: the identifying string (uuid or external ref) :param kwargs: used to pass provider-specific information :return: """ if key is not None: entity = self.__getitem__(key) # this checks upstream if it exists if entity is not None: # retrieve return entity # fetch return self._fetch(key, **kwargs) def get(self, key): return self.retrieve_or_fetch_entity(key) def validate_entity_list(self): """ This whole thing is crufty and untested and never used and should be abandoned :return: """ count = 0 for k, v in self._entities.items(): valid = True ''' # 1: confirm key is a UUID if not isinstance(k, uuid.UUID): print('Key %s is not a valid UUID.' % k) valid = False ''' if v.origin is None: print("%s: No origin!" % k) valid = False if v.origin == self.source: # 2: confirm entity's external key maps to its uuid if self._ref_to_uuid(v.external_ref) != v.uuid: print("%s: Key doesn't match UUID in origin!" % v.external_ref) valid = False # confirm entity is dict-like with keys() and with a set of common keys try: valid = valid & v.validate() except AttributeError: print('Key %s: not a valid LcEntity (no validate() method)' % k) valid = False if valid: count += 1 print('%d entities validated out of %d' % (count, len(self._entities))) return count def _load_all(self, **kwargs): """ Must be overridden in subclass :return: """ raise NotImplementedError def load_all(self, **kwargs): if self._loaded is False: print('Loading %s' % self.source) self._load_all(**kwargs) self._loaded = True def entities_by_type(self, entity_type): for u in sorted(self._ents_by_type[entity_type]): yield self._entities[u] def count_by_type(self, entity_type): return len(self._ents_by_type[entity_type]) @property def init_args(self): return self._serialize_dict def serialize(self, **kwargs): j = { 'dataSourceType': self.__class__.__name__, 'dataSource': self.source, 'catalogNames': {k: sorted(filter(None, s)) for k, s in self._catalog_names.items()}, 'initArgs': self._serialize_dict } return j def _serialize_all(self, **kwargs): """ To be overridden-- specify args necessary to make a complete copy :param kwargs: :return: """ return self.serialize(**kwargs) def write_to_file(self, filename, gzip=False, complete=False, **kwargs): """ :param filename: :param gzip: :param complete: :param kwargs: whatever is required by the subclass's serialize method :return: """ if self._source is None: self._set_source(self.ref, filename) # unless there was no source to begin with elif filename not in self.names: self._add_name(self.ref, filename) if complete: s = self._serialize_all(**kwargs) if self._loaded: s['loaded'] = True else: s = self.serialize(**kwargs) to_json(s, filename, gzip=gzip)
python
import bpy from bpy.props import * from ..node_socket import RenderNodeSocket, SocketBase, RenderNodeSocketmixin, RenderNodeSocketInterface from ..node_socket import update_node class RenderNodeSocketInterfaceRenderList(RenderNodeSocketmixin, RenderNodeSocketInterface, bpy.types.NodeSocketInterface): bl_idname = 'RSNodeSocketRenderList' bl_socket_idname = 'RSNodeSocketRenderList' bl_label = 'RenderList (RenderNode)' shape = 'DIAMOND' default_value = None def init_from_socket(self, node, socket): self.display_shape = self.shape def draw(self, context, layout): pass def draw_color(self, context): return 0.95, 0.95, 0.95, 1.0 class RSNodeSocketRenderList(bpy.types.NodeSocket, SocketBase): bl_idname = 'RSNodeSocketRenderList' bl_label = 'RSNodeSocketRenderList' compatible_sockets = ['RenderNodeMerge','RSNodeSocketMergeSettings'] shape = 'DIAMOND' default_value = None def draw(self, context, layout, node, text): layout.label(text=text) def draw_color(self, context, node): return 0.95, 0.95, 0.95, 1.0 def change_shape(self): self.display_shape = self.shape classes = ( RenderNodeSocketInterfaceRenderList, RSNodeSocketRenderList, ) def register(): for cls in classes: bpy.utils.register_class(cls) def unregister(): for cls in classes: bpy.utils.unregister_class(cls)
python
import ocaml assert(ocaml.Result.get_ok(ocaml.Result.Ok(True)) == True)
python
from opera.parser.yaml.node import Node from ..entity import Entity from ..path import Path from ..string import String class ImportDefinition(Entity): ATTRS = dict( file=Path, repository=String, namespace_prefix=String, namespace_uri=String, ) DEPRECATED = { "namespace_uri", } @classmethod def normalize(cls, yaml_node): if not isinstance(yaml_node.value, (str, dict)): cls.abort( "Invalid import data. Expected string or dict.", yaml_node.loc, ) if isinstance(yaml_node.value, str): return Node({Node("file"): yaml_node}) return yaml_node
python
#online = mongodb_online() #print('mongodb-online: ', online) #TODO: cron docker para mongo
python
# -*- coding: utf-8 -*- import json from django.db.models import Q from rest_framework.exceptions import PermissionDenied from rest_framework.generics import ( CreateAPIView, ListCreateAPIView, RetrieveAPIView, RetrieveUpdateDestroyAPIView ) from rest_framework.permissions import ( AllowAny, IsAdminUser, IsAuthenticated ) from rest_framework.response import Response from note.authentication import AuthorAndAllAdmins, IsAuthenticatedOrReadOnly from note.controller import ( delete_user, get_all_users, get_user_name_by_id, update_user ) from note.models import User, Note from note.serializers import UserSerializer, NoteSerializer from note.utils import sanitize_json_input class RegisterView(CreateAPIView): queryset = User.objects.all() permission_classes = (AllowAny,) serializer_class = UserSerializer class UsersAPIView(RetrieveAPIView): permission_classes = (IsAdminUser, ) serializer_class = UserSerializer def get(self, request): users = get_all_users() return Response(users) class UserAPIView(RetrieveUpdateDestroyAPIView): permission_classes = (IsAuthenticated, AuthorAndAllAdmins) serializer_class = UserSerializer def get_object(self): return self.request.user def get(self, request, user_id): user_name = get_user_name_by_id(user_id) content = {'user is': user_name} return Response(content) @sanitize_json_input def put(self, request, *args, **kwargs): data = json.loads(self.request.body) uuid = kwargs.get('user_id') user_name = update_user(request, data, uuid) content = {'user {} has been updated'.format(self.request.user.name): user_name} return Response(content) def delete(self, request, *args, **kwargs): user_name = get_user_name_by_id(kwargs.get('user_id')) delete_user(kwargs.get('user_id')) content = 'User {} has been deleted'.format(user_name) return Response(content) class NotesView(ListCreateAPIView): permission_classes = (IsAuthenticatedOrReadOnly, ) serializer_class = NoteSerializer def get_queryset(self): visibility = self.request.user.is_authenticated tags = dict(self.request.query_params).get('tag') keyword = self.request.query_params.get('keyword') filter = Q(user_id=self.request.user.id) if visibility else Q(is_private=visibility) if tags: filter &= Q(tags__title__in=tags) if keyword: filter &= Q(title__icontains=keyword) | Q(body__icontains=keyword) | Q(tags__title__icontains=keyword) notes_obj = Note.objects.filter(filter).distinct() return notes_obj class NoteView(RetrieveUpdateDestroyAPIView): permission_classes = (IsAuthenticatedOrReadOnly, ) serializer_class = NoteSerializer def get_object(self): notes_obj = Note.objects.get(id=self.kwargs.get('id')) notes_obj = notes_obj if notes_obj.user_id == self.request.user or notes_obj.is_private == False else None return notes_obj @sanitize_json_input def put(self, request, *args, **kwargs): notes_obj = Note.objects.get(id=self.kwargs.get('id')) if notes_obj.user_id == self.request.user: return self.update(request, *args, **kwargs) else: raise PermissionDenied def delete(self, request, *args, **kwargs): notes_obj = Note.objects.get(id=self.kwargs.get('id')) if notes_obj.user_id == self.request.user: return self.destroy(request, *args, **kwargs) else: raise PermissionDenied
python
from django.contrib import admin # Register your models here. from reg.models import UserProfile from .models import * class BookAuthorAdmin(admin.ModelAdmin): list_display = ('author_last_name', 'author_first_name', 'author_middle_name') search_fields = ('author_last_name', 'author_first_name', 'author_middle_name') list_filter = ('author_last_name',) ordering = ('-author_last_name',) class LibraryBookAdmin(admin.ModelAdmin): list_display = ('book_title', 'book_author_id', 'category','quantity', 'number_borrowed') search_fields = ('book_title',) fields = ('book_title', 'book_author_id', 'category') class SingleBookAdmin(admin.ModelAdmin): list_display = ('book_id', 'serial_number') def save_model(self, request, obj, form, change): admin.ModelAdmin.save_model(self, request, obj, form, change) if not change: obj.book_id.quantity += 1 if not obj.is_available_returned: obj.book_id.number_borrowed += 1 if obj.is_available_returned and obj.book_id.number_borrowed > 0: obj.book_id.number_borrowed -= 1 obj.book_id.save() admin.site.register(UserProfile) admin.site.register(LibraryBook, LibraryBookAdmin) admin.site.register(SingleBook, SingleBookAdmin) admin.site.register(BookAuthors, BookAuthorAdmin) admin.site.register(BorrowingLog) admin.site.register(BookCategory)
python
import numpy as np class ArgMaxPolicy(object): def __init__(self, critic): self.critic = critic def get_action(self, obs): if len(obs.shape) > 3: observation = obs else: observation = obs[None] ## TODO return the action that maxinmizes the Q-value # at the current observation as the output # argmax(-1) returns the index of last dimension (action, in this case) actions = self.critic.qa_values(observation).argmax(-1) return actions.squeeze()
python
# coding: utf-8 """ DocuSign REST API The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. OpenAPI spec version: v2 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class MergeField(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, allow_sender_to_edit=None, configuration_type=None, path=None, row=None, write_back=None): """ MergeField - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'allow_sender_to_edit': 'str', 'configuration_type': 'str', 'path': 'str', 'row': 'str', 'write_back': 'str' } self.attribute_map = { 'allow_sender_to_edit': 'allowSenderToEdit', 'configuration_type': 'configurationType', 'path': 'path', 'row': 'row', 'write_back': 'writeBack' } self._allow_sender_to_edit = allow_sender_to_edit self._configuration_type = configuration_type self._path = path self._row = row self._write_back = write_back @property def allow_sender_to_edit(self): """ Gets the allow_sender_to_edit of this MergeField. When set to **true**, the sender can modify the value of the custom tab during the sending process. :return: The allow_sender_to_edit of this MergeField. :rtype: str """ return self._allow_sender_to_edit @allow_sender_to_edit.setter def allow_sender_to_edit(self, allow_sender_to_edit): """ Sets the allow_sender_to_edit of this MergeField. When set to **true**, the sender can modify the value of the custom tab during the sending process. :param allow_sender_to_edit: The allow_sender_to_edit of this MergeField. :type: str """ self._allow_sender_to_edit = allow_sender_to_edit @property def configuration_type(self): """ Gets the configuration_type of this MergeField. If merge field's are being used, specifies the type of the merge field. The only supported value is **salesforce**. :return: The configuration_type of this MergeField. :rtype: str """ return self._configuration_type @configuration_type.setter def configuration_type(self, configuration_type): """ Sets the configuration_type of this MergeField. If merge field's are being used, specifies the type of the merge field. The only supported value is **salesforce**. :param configuration_type: The configuration_type of this MergeField. :type: str """ self._configuration_type = configuration_type @property def path(self): """ Gets the path of this MergeField. Sets the object associated with the custom tab. Currently this is the Salesforce Object. :return: The path of this MergeField. :rtype: str """ return self._path @path.setter def path(self, path): """ Sets the path of this MergeField. Sets the object associated with the custom tab. Currently this is the Salesforce Object. :param path: The path of this MergeField. :type: str """ self._path = path @property def row(self): """ Gets the row of this MergeField. Specifies the row number in a Salesforce table that the merge field value corresponds to. :return: The row of this MergeField. :rtype: str """ return self._row @row.setter def row(self, row): """ Sets the row of this MergeField. Specifies the row number in a Salesforce table that the merge field value corresponds to. :param row: The row of this MergeField. :type: str """ self._row = row @property def write_back(self): """ Gets the write_back of this MergeField. When wet to true, the information entered in the tab automatically updates the related Salesforce data when an envelope is completed. :return: The write_back of this MergeField. :rtype: str """ return self._write_back @write_back.setter def write_back(self, write_back): """ Sets the write_back of this MergeField. When wet to true, the information entered in the tab automatically updates the related Salesforce data when an envelope is completed. :param write_back: The write_back of this MergeField. :type: str """ self._write_back = write_back def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
python
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import json import argparse from modules.FIC_Core import FICCore from modules.config import DEFAULT_DAYS, REPOSITORIES_FILE class FICMainMenu(FICCore): def __init__(self): FICCore.__init__(self) self.all = False self.git_only = False self.hg_only = False self.repo_selection = False self.logging = False self.days = DEFAULT_DAYS self.push = False self.dev = False self.skip_menu = False self.parser = argparse.ArgumentParser() self.arguments_set = False # Check to see if we set the Flag values or not. Helps to skip un-needed iterations. def start(self): """ The entry point for script. Runs the entire logic depending of the arguments. """ # Set all argument flags, based on runtime arguments. self._available_arguments() # Check if we want to skip the menu or not. if not self.skip_menu: self._main_menu() # Skip the menu. else: # TODO: Add ability to skip every menu. Not only ALL # Check if we ONLY typed `python client.py -s/--skip-menu` # If check is True, set self.all = True then run FIC main logic. # We don't need to check if "-s/--skip-menu" is present, as this is the only way to # enter this else statement. self.all = True if len(sys.argv) <= 2 else self.all self.run_fic(all=self.all, git_only=self.git_only, hg_only=self.hg_only, days=self.days, logging=self.logging) def _available_arguments(self): """ This method reads and set all the arguments flags. """ self.parser.add_argument('-a', '--all', required=False, action='store_true', default=False, help='Runs script for all available repositories') self.parser.add_argument('-g', '--git', required=False, action='store_true', default=False, help='Runs script only for repos that are on GitHub') self.parser.add_argument('-hg', '--mercurial', required=False, action='store_true', default=False, help='Runs script only for repos that are on Mercurial') self.parser.add_argument("-r", "--repo", required=False, nargs="*", help="Let the user choose for which repositories to run") self.parser.add_argument("-l", "--logging", required=False, action='store_true', default=False, help="Activate logger output in the console") self.parser.add_argument("-days", "--days", required=False, action='store', default=DEFAULT_DAYS, help="Generate the changelog.md for <int> amount of days.") self.parser.add_argument("-p", "--push", required=False, action='store_true', default=False, help="Runs for all available repositories and auto-push to github") self.parser.add_argument("-dev", "--development", required=False, action='store_true', default=False, help="Activate development mode") self.parser.add_argument("-s", "--skip-menu", required=False, action="store_true", default=False, help="Skip MainMenu. Used for automatization.") self.args = self.parser.parse_args() self._set_arguments_flags() def _set_arguments_flags(self): """ This method changes the flags state depending of the arguments. """ # Check that we have parsed all arguments. if not self.args: self._available_arguments() else: pass # Create and set flags. if self.args.all: self.all = True if self.args.git: self.git_only = True if self.args.mercurial: self.hg_only = True # Check if Manual Repo Selection is present and in which mode: # - If `-r` is missing. (Return: False) # - If `-r` is present, but no list present. (Return: True) # - If `-r` is present and a list of repos are present. (Return: List of repos) repo_selection = False if isinstance(self.args.repo, type(None)) else self.args.repo if repo_selection: self.repo_selection = self.args.repo if self.args.logging: self.logging = True if self.args.days: if str(self.args.days).isdecimal(): self.days = int(self.args.days) else: print("When using -d/--days please insert a number of days.\n" "Example: python3 client.py -d 30 or --days 10") exit(4) if self.args.push: self.push = True if self.args.development: self.dev = True if self.args.skip_menu: self.skip_menu = True self.arguments_set = True def _construct_mainmenu_text(self): """ Creates the main-menu content and prepare it to be displayed. :return: the main menu text """ if not self.arguments_set: self._set_arguments_flags() else: pass menu_header = "Welcome to Ciduty's Firefox Infra Changelog!\n" \ "You can use the options below to run the script according to your needs.\n" menu_logging = "==== Logging is active ====\n" menu_dev = "==== Dev Mode is active ====\n" menu_days = f"==== Generating Changelog for {self.days} days ====\n" menu_notifications = (menu_logging if self.logging else "") + \ (menu_dev if self.dev else "") + \ (menu_days if self.days is not DEFAULT_DAYS else "") menu_options = "1. Run script for all available repositories \n" \ "2. Run script only for repositories that are on GitHub\n" \ "3. Run script only for repositories that are on Mercurial\n" \ "4. Run script for repositories that you choose\n" \ "5. Activates logger output in console\n" \ "6. Generates changelog.md for the amount of days set by user\n" \ "7. Run the script for all repositories and push changes to Github\n" \ "0. Exit application." return menu_header + menu_notifications + menu_options def _main_menu(self): """ This method prints the main menu and reads the chosen options. """ print(self._construct_mainmenu_text()) self.choice = int(input()) self._run_selected_menu(choice=self.choice) def _run_selected_menu(self, choice): """ This method calls the run_fic method depending of the chosen option. :param choice: the chosen option by user """ if choice == 1: self.LOGGER.info(f"Script running for choice {choice}: ALL Repositories.") self.run_fic(all=True, logging=self.logging, days=self.days) if choice == 2: self.LOGGER.info(f"Script running for choice {choice}: Git Repositories Only.") self.run_fic(git_only=True, logging=self.logging, days=self.days) if choice == 3: self.LOGGER.info(f"Script running for choice {choice}: HG Repositories Only.") self.run_fic(hg_only=True, logging=self.logging, days=self.days) if choice == 4: self.LOGGER.info(f"Script running for choice {choice}: Custom Repositories.") self._repo_selection_menu() self.run_fic(repo_list=self.repo_selection) if choice == 5: self.logging = not self.logging if self.logging: self.LOGGER.info("Console Logging has been activated.") else: self.LOGGER.info("Console Logging has been deactivated.") self._main_menu() if choice == 6: print("Please input the amount of days `changelog.md` will be generated for:") days = input() if str(days).isdecimal(): self.days = int(days) self.LOGGER.info(f"DEFAULT_DAYS parameter has been changed to: {self.days} day(s)") self._main_menu() else: print("Amount of days need to be an integer!\n" "Moving back to Main Menu.") self._main_menu() if choice == 7: self.LOGGER.info(f"Script running for choice {choice}: ALL Repositories and PUSH changes to GitHub") self.run_fic(all=True, push=True, logging=self.logging, days=self.days) if choice == 0: exit() def _repo_selection_menu(self): """ Load available repositories and prepares them for user selection. """ repo_list = json.load(self.load(None, REPOSITORIES_FILE)) temp_list = [] # Argument "-r" provided, but no list of repositories is included. # Enter Selection Menu. if not self.repo_selection or (len(self.repo_selection) == 0): self._construct_repo_selection(repo_list) # Argument "-r" provided and list of repositories is included. # Skip Selection Menu else: for key in repo_list: for repo in repo_list.get(key): for selection in self.repo_selection: if int(selection) == repo_list.get(key).get(repo).get("order"): temp_list.append((int(selection), repo, key)) self.repo_selection = [] for _, repo, key in temp_list: self.repo_selection.append((repo, key)) def _construct_repo_selection(self, repo_list): """ The method that creates the list of the repositories chosen by user. """ temp_list = [] self.repo_selection = [] for key in repo_list: for repo in repo_list.get(key): temp_list.append((repo_list.get(key).get(repo).get("order"), repo, key)) print("Available Repositories:") for entry in sorted(temp_list): print(entry[0], entry[1]) print("Enter the number of the repositorie(s) you want to run, separated by comma.\n" "Example: 1, 5, 20, 3, 2") choices = input() choices = choices.split(",") self.repo_selection = [] for key in repo_list: for repo in repo_list.get(key): for choice in choices: if int(choice) == repo_list.get(key).get(repo).get("order"): self.repo_selection.append((repo, key))
python
from django import forms from django.contrib.admin import widgets import os CHOICE = { ('0','キュート'), ('1','クール'), ('2','パッション'), } form SampleForm(forms.Form): select = forms.ChoiceField(label='属性', widget=forms.RadioSelect, choices= CHOICE, initial=0)
python
L=[[[*map(int,v.split(','))]for v in l.split('->')]for l in open("inputday5")] r,m=lambda n,x,y:x<=n<=y or y<=n<=x,max(max(max(p)for p in l)for l in L)+1 c=lambda a,b,f,s,w:(r(a,f[0],s[0])and r(b,f[1],s[1])and(f[0]==s[0]or f[1]==s[1]or(w and abs(f[0]-a)==abs(f[1]-b)))) print(*(sum(a)for a in((sum(c(i,j,f,s,b)for f,s in L)>1 for j in range(m)for i in range(m))for b in(0,1))))
python
# Filename: HCm_UV_v5.0.py ##################### ###### IMPORTS ###### ##################### import string import numpy as np import sys #sys.stderr = open('errorlog.txt', 'w') import warnings warnings.filterwarnings("ignore") ####################### ###### FUNCTIONS ###### ####################### #Function for interpolation of grids def interpolate(grid,z,zmin,zmax,n): #Columns of the library n_comments = 0 with open('Libraries_uv/C17_POPSTAR_1myr_uv.dat', 'r') as file1: for line in file1: if line[0] == '#': n_comments += 1 auxiliar_labels = np.genfromtxt('Libraries_uv/C17_POPSTAR_1myr_uv.dat', dtype=None, names=True, encoding = 'ascii', skip_header=n_comments).dtype.names ncol = len(auxiliar_labels) vec = [] if z == 2: label_z = 'logU' if z == 1: label_z = 'logCO' if z == 0: label_z = '12logOH' type_list_names = [] for col in auxiliar_labels: inter = 0 no_inter = 0 type_list_names.append((col, float)) for row in range(0,len(grid)): if grid[label_z][row] < zmin or grid[label_z][row] > zmax: continue if z == 2: x = '12logOH'; y = 'logCO' if z == 1: x = '12logOH'; y = 'logU' if z == 0: x = 'logCO'; y = 'logU' if row == (len(grid)-1): vec.append(grid[col][row]) no_inter = no_inter + 1 elif grid[x][row] < grid[x][row+1] or grid[y][row] < grid[y][row+1] : vec.append(grid[col][row]) no_inter = no_inter + 1 else: inter = inter + 1 for index in range(0,n): i = grid[col][row]+(index)*(grid[col][row+1]-grid[col][row])/n vec.append(i) out_aux = np.transpose(np.reshape(vec,(-1,n*inter+no_inter))) out = np.zeros(out_aux.shape[0], dtype=type_list_names) for col_n in range(0, len(auxiliar_labels)): out[auxiliar_labels[col_n]] = out_aux[:, col_n] return out ################################ ###### INITIAL ITERATIONS ###### ################################ #Description of the code print ('-------------------------------------------------') print ('This is HII-CHI-mistry for UV version 5.0') print ('See Perez-Montero, & Amorin (2017) for details') print ('Insert the name of your input text file with some or all of the following columns:') print (' Lya 1216') print (' NV] 1239') print (' CIV 1549') print (' HeII 1640') print (' OIII 1665') print (' CIII 1909') print (' Hb 4861') print (' OIII 5007') print ('in arbitrary units and reddening corrected. Each column must be given with labels for the lines and their corresponding flux errors.') print ('-------------------------------------------------') # Input file reading if len(sys.argv) == 1: if int(sys.version[0]) < 3: input00 = raw_input('Insert input file name:') else: input00 = input('Insert input file name:') else: input00 = str(sys.argv[1]) try: #Counting comments: n_comments = 0 with open(input00, 'r') as file2: for line in file2: if line[0] == '#': n_comments += 1 input0 = np.genfromtxt(input00,dtype=None,names=True, encoding = 'ascii', skip_header = n_comments) print ('The input file is:'+input00) except: print ('Input file error: It does not exist or has wrong format') sys.exit print ('') if input0.size == 1: input1 = np.stack((input0,input0)) else: input1 = input0 # Iterations for Montecarlo error derivation if len(sys.argv) < 3: n = 25 else: n = int(sys.argv[2]) print ('The number of iterations for MonteCarlo simulation is: ',n) print ('') ############################################# ###### SELECTION OF THE GRID OF MODELS ###### ############################################# #Interface with the user print ('') question = True while question: print ('-------------------------------------------------') print ('Default SEDs') print ('------------') print ('(1) POPSTAR with Chabrier IMF, age = 1 Myr') print ('(2) BPASS v.2.1 a_IMF = 1.35, Mup = 300, age = 1Myr with binaries') print ('(3) AGN, double component, a(UV) = -1.0') print ('') print ('Other SED') print ('---------') print ('(4) Different library') print ('-------------------------------------------------') if int(sys.version[0]) < 3: sed = raw_input('Choose SED of the models: ') else: sed = input('Choose SED of the models: ') if sed == '1' or sed == '2' or sed == '3' or sed == '4': question = False print ('') #Further questions on the AGN models if sed == '3': #SLOPE ALPHA question = True while question: if int(sys.version[0]) < 3: alpha = raw_input('Choose value for alpha(OX) in the AGN models: [1] -0.8 [2] -1.2: ') else: alpha = input('Choose value for alpha(OX) in the AGN models: [1] -0.8 [2] -1.2: ') if alpha == '1' or alpha == '2': question = False print ('') #FRACTION OF FREE ELECTRONS question = True while question: if int(sys.version[0]) < 3: efrac = raw_input('Choose stop criterion in the AGN models: [1] 2% free electrons [2] 98% free electrons: ') else: efrac = input('Choose stop criterion in the AGN models: [1] 2% free electrons [2] 98% free electrons: ') if efrac == '1' or efrac == '2': question = False #Presence or absence of dust in the models question = True while question: if int(sys.version[0]) < 3: grains = raw_input('Choose AGN models with [1] or without [2] dust grains: ') else: grains = input('Choose AGN models with [1] or without [2] dust grains: ') if grains == '1' or grains == '2': question = False print ('') #Particular file introduced by the user if sed == '4': question = True while question: print ('Introduce name of the file containing the models. It must be located in the folder "Libraries_uv".') print (' ') if int(sys.version[0]) < 3: new_library = raw_input('Name of file: ') else: new_library = input('Name of file: ') #Searching for the file try: #Counting comments: n_comments = 0 with open('Libraries_uv/'+new_library, 'r') as file3: for line in file3: if line[0] == '#': n_comments += 1 library_user = np.genfromtxt('Libraries_uv/'+new_library, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) print (' ') print ('Loading library '+new_library+'. Checking correct format of the file.') question = False except: print (' ') print ('Library was not found in folder "Libraries_uv" or file does not exist.') question = True while question: try: #Counting comments: n_comments = 0 with open('Libraries_uv/'+new_library, 'r') as file4: for line in file4: if line[0] == '#': n_comments += 1 library_user = np.genfromtxt('Libraries_uv/'+new_library, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) #Checking correct format: #Counting comments: n_comments = 0 with open('Libraries_uv/C17_POPSTAR_1myr_uv.dat', 'r') as file5: for line in file5: if line[0] == '#': n_comments += 1 auxiliar_labels = np.genfromtxt('Libraries_uv/C17_POPSTAR_1myr_uv.dat', dtype=None, names=True, encoding = 'ascii', skip_header=n_comments).dtype.names missing_labels = [] for label in auxiliar_labels: if label in library_user.dtype.names: continue else: missing_labels.append(label) #Displaying message for the user: print('Succesfully reading of the file') if len(missing_labels) == 0: print ('File presents the correct format') question = False else: print ('File does not present the correct format. The following columns are missing:') for need_label in missing_labels: print('- '+need_label) print ('More details on the correct format for the library are found in readme file.') print (' ') print ('Reintroduce name of the file with fixed format:') print (' ') if int(sys.version[0]) < 3: new_library = raw_input('Name of file: ') else: new_library = input('Name of file: ') except: print ('Something went wrong while reading file. Please, reintroduce name of the file:') print ('') if int(sys.version[0]) < 3: new_library = raw_input('Name of file: ') else: new_library = input('Name of file: ') #Interpolation in the grid of models question = True print ('') while question: if int(sys.version[0]) < 3: inter = raw_input('Choose models [0] No interpolated [1] Interpolated: ') else: inter = input('Choose models [0] No interpolated [1] Interpolated: ') if inter == '0' or inter == '1': question = False print ('') sed = int(sed) inter = int(inter) alpha = int(alpha) efrac = int(efrac) grains = int(grains) #POPSTAR MODEL if sed==1: file_lib = 'C17_POPSTAR_1myr_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file6: for line in file6: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. No interpolation.' print ('No interpolation for the POPSTAR models is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') print ('') res_CO = 0.125 elif inter == 1: sed_type = 'POPSTAR, age = 1 Myr, Chabrier IMF. Interpolation.' print ('Interpolation for the POPSTAR models is going to be used.') print ('The grid has a resolution of 0.01dex for O/H and 0.0125dex for C/O.') print ('') res_CO = 0.125 #BPASS MODEL elif sed==2: file_lib = 'C17_BPASS_IMF135_mup300_1myr_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file7: for line in file7: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'BPASS a_IMF = 1.35, M_up = 300, age = 1Myr, with binaries. No interpolation.' print ('No interpolation for the BPASS models is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') print ('') res_CO = 0.125 elif inter == 1: sed_type = 'BPASS v.2.1, a_IMF = 1.35, M_up = 300, age = 1Myr. Interpolation.' print ('Interpolation for the BPASS models is going to be used.') print ('The grid has a resolution of 0.01dex for O/H and 0.0125dex for C/O.') print ('') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -0.8, efrac = 2%, with dust grains elif sed==3 and alpha ==1 and efrac == 1 and grains == 1: file_lib = 'C17_AGN_alpha08_efrac02_CNfix_grains_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 2% with dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -0.8 with 2% free electrons and dust grains models is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 2% and with dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -0.8, 2% free electrons and with dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -0.8, efrac = 2%, without dust grains elif sed==3 and alpha ==1 and efrac == 1 and grains == 2: file_lib = 'C17_AGN_alpha08_efrac02_CNfix_nograins_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 2% without dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -0.8 with 2% free electrons models without grains is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 2% and without dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -0.8, 2% free electrons and without dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -0.8, efrac = 98%, with dust grains elif sed==3 and alpha ==1 and efrac == 2 and grains == 1: file_lib = 'C17_AGN_alpha08_efrac98_CNfix_grains_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 98% with dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -0.8 with 98% free electrons and dust grains models is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 98% and with dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -0.8, 98% free electrons and with dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -0.8, efrac = 98%, without dust grains elif sed==3 and alpha ==1 and efrac == 2 and grains == 2: file_lib = 'C17_AGN_alpha08_efrac98_CNfix_nograins_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -0.8 and free electron fraction = 98% without dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -0.8 with 98% free electrons models without grains is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -0.8, free electron fraction = 98% and without dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -0.8, 98% free electrons and without dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -1.2, efrac = 2%, with dust grains elif sed==3 and alpha ==2 and efrac == 1 and grains == 1: file_lib = 'C17_AGN_alpha12_efrac02_CNfix_grains_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 2% with dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -1.2 with 2% free electrons and dust grains models is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 2% and with dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -1.2, 2% free electrons and with dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -1.2, efrac = 2%, without dust grains elif sed==3 and alpha ==2 and efrac == 1 and grains == 2: file_lib = 'C17_AGN_alpha12_efrac02_CNfix_nograins_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 2% without dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -1.2 with 2% free electrons models without grains is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 2% and without dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -1.2, 2% free electrons and without dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -1.2, efrac = 98%, with dust grains elif sed==3 and alpha ==2 and efrac == 2 and grains == 1: file_lib = 'C17_AGN_alpha12_efrac98_CNfix_grains_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 98% with dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -1.2 with 98% free electrons and dust grains models is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_CO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 98% and with dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -1.2, 98% free electrons and with dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_CO = 0.125 #AGN MODEL FOR alpha_OX = -1.2, efrac = 98%, without dust grains elif sed==3 and alpha ==2 and efrac == 2 and grains == 2: file_lib = 'C17_AGN_alpha12_efrac98_CNfix_nograins_uv.dat' #Counting comments: n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'Double composite AGN, a(OX) = -1.2 and free electron fraction = 98% without dust grains. No interpolation.' print ('No interpolation for the AGN a(ox) = -1.2 with 98% free electrons models without grains is going to be used.') print ('The grid has a resolution of 0.1dex for O/H and 0.125dex for C/O.') res_NO = 0.125 elif inter == 1: sed_type = 'Double composite AGN, a(OX) = -1.2, free electron fraction = 98% and without dust grains. Interpolation.' print ('Interpolation for the AGN a(ox) = -1.2, 98% free electrons and without dust models is going to be used.') print ('The grid has a resolution of 0.01 dex for O/H and 0.0125 dex for C/O.') res_NO = 0.125 #Different library elif sed==4: file_lib = new_library #Counting comments: n_comments = 0 with open('Libraries_uv/'+new_library, 'r') as file8: for line in file8: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+new_library,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if inter == 0: sed_type = 'User file ' + new_library + ' used as library for the models no interpolated' print ('No interpolation for the library '+new_library) res_CO = 0.125 elif inter == 1: sed_type = 'User file ' + new_library + ' used as library for the models interpolated' print ('Interpolation for the library '+new_library) res_CO = 0.125 #Valuable columns of the files uv_lin = ['12logOH', 'logCO', 'logU', 'Lya_1216', 'CIV_1549', 'HeII_1640', 'OIII_1665', 'CIII_1909', 'OIII_5007'] lin_uv_label = ['12+log(O/H)', 'log(C/O)', 'log(U)', 'Lya_1216', 'CIV_1549', 'HeII_1640', 'OIII_1665', 'CIII_1909', 'OIII_5007'] ######################################## ###### SORTING THE GRID OF MODELS ###### ######################################## print (' ') print ('Sorting the grid of models') print (' ') index_OH_CO_U_sorted = [] #storing the correct order of the indexes #Sorting abundances 12+log(O/H) OH_values = grid_aux['12logOH'] #Oxygen abundances if len(OH_values) != 1: sorted_list_OH = sorted(range(len(OH_values)),key=OH_values.__getitem__) if len(OH_values) == 1: sorted_list_OH = [0] #Sorting abundance ratios log(C/O) OH_values_diff = list(set(OH_values[sorted_list_OH])) OH_values_diff.sort() #It is necessary to sort again the list of different elements for OH_num in OH_values_diff: index_OH_fix = np.where(OH_values == OH_num)[0] #Index(es) for a particular abundance 12+log(O/H) CO_values = grid_aux['logCO'][index_OH_fix] if len(CO_values) != 1: sorted_list_CO = sorted(range(len(CO_values)), key=CO_values.__getitem__) if len(CO_values) == 1: sorted_list_CO = [0] CO_values_diff = list(set(CO_values[sorted_list_CO])) CO_values_diff.sort() #It s necessary to sort again the list of different elements for CO_num in CO_values_diff: index_OH_CO_fix = np.where(CO_values == CO_num)[0] #Index(es) for particular abundances 12+log(O/H) and log(C/O) #Sorting ionization parameters U_values = grid_aux['logU'][index_OH_fix[index_OH_CO_fix]] if len(U_values) != 1: sorted_list_U = sorted(range(len(U_values)), key=U_values.__getitem__) if len(U_values) == 1: sorted_list_U = [0] index_OH_CO_U = index_OH_fix[index_OH_CO_fix[sorted_list_U]] #Sorted index(es) for U at fixed O/H and C/O for index_sort in index_OH_CO_U: index_OH_CO_U_sorted.append(index_sort) #Adding index in the correct order #Generating new library file list_comments = [] #Storing comments in the file: with open('Libraries_uv/'+file_lib, 'r') as file_aux: for line in file_aux: if line[0] == '#': list_comments.append(line) #Storing columns: lin_uv_col = [] #Retrieving each column of the grid for label in uv_lin: aux_col = grid_aux[label].tolist() lin_uv_col.append(aux_col) #Comments grid_to_write = open('Libraries_uv/'+file_lib, 'w') for line_com in list_comments: grid_to_write.write(line_com) #Header line label_line = '{:15} '.format(lin_uv_label[0].replace(' ','')) for ind in range(1, len(lin_uv_label)-1): label_line += '\t {:15} '.format(lin_uv_label[ind].replace(' ','')) label_line += '\t {:15}\n'.format(lin_uv_label[-1].replace(' ','')) grid_to_write.write(label_line) #Values: for ind_val in index_OH_CO_U_sorted: val_line = '{:7.7f} '.format(lin_uv_col[0][ind_val]) for ind2 in range(1, len(lin_uv_label)-1): val_line += '\t {:7.7f} '.format(lin_uv_col[ind2][ind_val]) val_line += '\t {:7.7f}\n'.format(lin_uv_col[-1][ind_val]) grid_to_write.write(val_line) grid_to_write.close() #Opening sorted grid of models n_comments = 0 with open('Libraries_uv/'+file_lib, 'r') as file12: for line in file12: if line[0] == '#': n_comments += 1 grid_aux = np.genfromtxt('Libraries_uv/'+file_lib, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) ################################################ ###### CONSTRAINTS FOR THE GRID OF MODELS ###### ################################################ #Reading constraints and creating library with constraints print (' ') print ('Select a file with the constraint laws to be used to limit the grid of models when the measurement of a quantity is impossible without any relation.') print (' ') print ('') question = True while question: print ('-------------------------------------------------') print ('Default constraints') print ('-------------------') print ('(1) Constraints for Star-Forming Galaxies') print ('(2) Constraints for Extreme Emission Line Galaxies') print ('(3) Constraints for AGNs (no restriction in the ionization parameter)') print ('') print ('Other constraints') print ('-----------------') print ('(4) Different constraint file') print ('-------------------------------------------------') if int(sys.version[0]) < 3: const = raw_input('Choose constraint for the grids: ') else: const = input('Choose constraint for the grids: ') if const == '1' or const == '2' or const == '3' or const == '4': question = False print ('') #Particular file introduced by the user if const == '4': question = True while question: print ('Introduce name of the file containing the constraints for the grids. It must be located in the folder "Constraints".') print (' ') if int(sys.version[0]) < 3: new_const = raw_input('Name of file: ') else: new_const = input('Name of file: ') #Searching for the file try: #Counting comments: n_comments = 0 with open('Constraints/'+new_const, 'r') as file9: for line in file9: if line[0] == '#': n_comments += 1 const_user = np.genfromtxt('Constraints/'+new_const, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) print (' ') print ('Loading constraint file '+new_const+'. Checking correct format of the file.') question = False except: print (' ') print ('File was not found in folder "Constraints" or file does not exist.') question = True while question: try: #Counting comments: n_comments = 0 with open('Constraints/'+new_const, 'r') as file10: for line in file10: if line[0] == '#': n_comments += 1 const_user = np.genfromtxt('Constraints/'+new_const, dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) #Checking correct format: #Counting comments: n_comments = 0 with open('Constraints/template_OH.dat', 'r') as file11: for line in file11: if line[0] == '#': n_comments += 1 auxiliar_labels = np.genfromtxt('Constraints/template_OH.dat', dtype=None, names=True, encoding = 'ascii', skip_header=n_comments).dtype.names missing_labels = [] for label in auxiliar_labels: if label in const_user.dtype.names: continue else: missing_labels.append(label) #Displaying message for the user: print ('Succesfully reading of the file') if len(missing_labels) == 0: print ('File presents the correct format') question = False else: print ('File does not present the correct format. The following columns are missing:') for need_label in missing_labels: print('- '+need_label) print ('More details on the correct format for the library are found in readme file.') print (' ') print ('Reintroduce name of the file with fixed format:') print (' ') if int(sys.version[0]) < 3: new_const = raw_input('Name of file: ') else: new_const = input('Name of file: ') except: print ('Something went wrong while reading file. Please, reintroduce name of the file:') print (' ') if int(sys.version[0]) < 3: new_const = raw_input('Name of file: ') else: new_const = input('Name of file: ') #Generation of grids with constraints laws: if const == '1' or const == '2' or const == '3' or const == '4': #First grid does not change grid1 = grid_aux file_lib_2 = file_lib #Generating libraries for the constraints in the files if const == '1': #Star-Forming Galaxies const_file = 'template_OH.dat' name_const = 'Constraints/template_OH.dat' n_comments = 0 with open(name_const, 'r') as file12: for line in file12: if line[0] == '#': n_comments += 1 const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if const == '2': const_file = 'template_OH_eelg.dat' name_const = 'Constraints/template_OH_eelg.dat' n_comments = 0 with open(name_const, 'r') as file13: for line in file13: if line[0] == '#': n_comments += 1 const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if const == '3': name_const = 'Constraints/template_OH_agn.dat' const_file = 'template_OH_agn.dat' n_comments = 0 with open(name_const, 'r') as file18: for line in file18: if line[0] == '#': n_comments += 1 const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) if const == '4': const_file = new_const name_const = 'Constraints/'+new_const n_comments = 0 with open(name_const, 'r') as file14: for line in file14: if line[0] == '#': n_comments += 1 const_data = np.genfromtxt(name_const,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) #Limiting the grids: lin_uv_val = [] #The initial grid need to be constrained in the ionization parameter #Retrieving each column of the grid for label in uv_lin: aux_col = grid1[label].tolist() lin_uv_val.append(aux_col) #Creation of the grids name_OH_U = '.'.join(file_lib_2.split('.')[0:-1])+'_OH_U_constrained.'+file_lib.split('.')[-1] name_OH_U_CO = '.'.join(file_lib_2.split('.')[0:-1])+'_OH_U_CO_constrained.'+file_lib.split('.')[-1] file_open = open('Libraries_uv/'+ name_OH_U, 'w') #OH and U relation file_open_2 = open('Libraries_uv/'+name_OH_U_CO, 'w') #OH, CO and U relation file_open.write('#Constrained by relation between 12+log(O/H) and log(U)\n') file_open_2.write('#Constrained by relation between 12+log(O/H), log(U) and log(C/O)\n') #Header line label_line = '{:15} '.format(lin_uv_label[0].replace(' ','')) for ind in range(1, len(lin_uv_label)-1): label_line += '\t {:15} '.format(lin_uv_label[ind].replace(' ','')) label_line += '\t {:15}\n'.format(lin_uv_label[-1].replace(' ','')) file_open.write(label_line) file_open_2.write(label_line) #Values: for ind_val in range(0, len(lin_uv_val[0])): index_desired = np.where(const_data['12logOH'] == lin_uv_val[0][ind_val])[0][0] #Searching for constrain in given value of O/H if lin_uv_val[2][ind_val] <= const_data['logU_max'][index_desired] and lin_uv_val[2][ind_val] >= const_data['logU_min'][index_desired]: val_line = '{:7.7f} '.format(lin_uv_val[0][ind_val]) for ind2 in range(1, len(lin_uv_label)-1): val_line += '\t {:7.7f} '.format(lin_uv_val[ind2][ind_val]) val_line += '\t {:7.7f}\n'.format(lin_uv_val[-1][ind_val]) file_open.write(val_line) if lin_uv_val[2][ind_val] <= const_data['logU_max'][index_desired] and lin_uv_val[2][ind_val] >= const_data['logU_min'][index_desired] and lin_uv_val[1][ind_val] <= const_data['logCO_max'][index_desired] and lin_uv_val[1][ind_val] >= const_data['logCO_min'][index_desired]: val_line = '{:7.7f} '.format(lin_uv_val[0][ind_val]) for ind2 in range(1, len(lin_uv_label)-1): val_line += '\t {:7.7f} '.format(lin_uv_val[ind2][ind_val]) val_line += '\t {:7.7f}\n'.format(lin_uv_val[-1][ind_val]) file_open_2.write(val_line) file_open.close() file_open_2.close() #Counting comments: n_comments = 0 with open('Libraries_uv/'+name_OH_U, 'r') as file15: for line in file15: if line[0] == '#': n_comments += 1 grid2 = np.genfromtxt('Libraries_uv/'+name_OH_U,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) n_comments = 0 with open('Libraries_uv/'+name_OH_U_CO, 'r') as file: for line in file: if line[0] == '#': n_comments += 1 grid3 = np.genfromtxt('Libraries_uv/'+name_OH_U_CO,dtype=None,names=True, encoding = 'ascii', skip_header=n_comments) #Residual in CO if inter==0: res_CO = np.max([sorted(set(grid1['logCO']))[ind+1]-sorted(set(grid1['logCO']))[ind] for ind in range(0, len(set(grid1['logCO']))-1)]) if inter==1: res_CO = np.max([sorted(set(grid1['logCO']))[ind+1]-sorted(set(grid1['logCO']))[ind] for ind in range(0, len(set(grid1['logCO']))-1)])/10 ########################################### ###### SUMMARY OF THE GRID OF MODELS ###### ########################################### print ('-------------------------------------------------') print ('Summary of the models') print ('---------------------') print ('Libraries generated with the constraints. The following grids are going to be used:') print ('- Full library (Grid#1): '+file_lib_2) print (' Total number of models: ' + str(len(grid1))) print ('- Library constrained by 12+log(O/H) - log(U) relation (Grid#2): '+name_OH_U) print (' Total number of models: ' + str(len(grid2))) print ('- Library constrained by 12+log(O/H) - log(U) - log(C/O) relation (Grid#3): '+name_OH_U_CO) print (' Total number of models: ' + str(len(grid3))) print ('-------------------------------------------------') print (' ') ################################################# ###### CREATING ARRAY TO STORE ESTIMATIONS ###### ################################################# grids = [] OHffs = [] eOHffs = [] COffs = [] eCOffs = [] logUffs = [] elogUffs = [] Label_ID = False Label_Lya = False Label_eLya = False Label_NV = False Label_eNV = False Label_CIV = False Label_eCIV = False Label_HeII = False Label_eHeII = False Label_OIII_1665 = False Label_eOIII_1665 = False Label_CIII = False Label_eCIII = False Label_OIII_5007 = False Label_eOIII_5007 = False Label_Hbeta = False Label_eHbeta = False #Checking input information for col in range(0,len(input1.dtype.names),1): if input1.dtype.names[col] == 'ID': Label_ID = True if input1.dtype.names[col] == 'Lya_1216': Label_Lya = True if input1.dtype.names[col] == 'eLya_1216': Label_eLya = True if input1.dtype.names[col] == 'NV_1239': Label_NV = True if input1.dtype.names[col] == 'eNV_1239': Label_eNV = True if input1.dtype.names[col] == 'CIV_1549': Label_CIV = True if input1.dtype.names[col] == 'eCIV_1549': Label_eCIV = True if input1.dtype.names[col] == 'HeII_1640': Label_HeII = True if input1.dtype.names[col] == 'eHeII_1640': Label_eHeII = True if input1.dtype.names[col] == 'OIII_1665': Label_OIII_1665 = True if input1.dtype.names[col] == 'eOIII_1665': Label_eOIII_1665 = True if input1.dtype.names[col] == 'CIII_1909': Label_CIII = True if input1.dtype.names[col] == 'eCIII_1909': Label_eCIII = True if input1.dtype.names[col] == 'Hb_4861': Label_Hbeta = True if input1.dtype.names[col] == 'eHb_4861': Label_eHbeta = True if input1.dtype.names[col] == 'OIII_5007': Label_OIII_5007 = True if input1.dtype.names[col] == 'eOIII_5007': Label_eOIII_5007 = True #Adapting final output with information from given input if Label_ID == False: Names = np.arange(1,input1.size+1,1) else: Names = input1['ID'] if Label_Lya == False: Lya_1216 = np.zeros(input1.size) else: Lya_1216 = input1['Lya_1216'] if Label_eLya == False: eLya_1216 = np.zeros(input1.size) else: eLya_1216 = input1['eLya_1216'] if Label_NV == False: NV_1239 = np.zeros(input1.size) else: NV_1239 = input1['NV_1239'] if Label_eNV == False: eNV_1239 = np.zeros(input1.size) else: eNV_1239 = input1['eNV_1239'] if Label_CIV == False: CIV_1549 = np.zeros(input1.size) else: CIV_1549 = input1['CIV_1549'] if Label_eCIV == False: eCIV_1549 = np.zeros(input1.size) else: eCIV_1549 = input1['eCIV_1549'] if Label_HeII == False: HeII_1640 = np.zeros(input1.size) else: HeII_1640 = input1['HeII_1640'] if Label_eHeII == False: eHeII_1640 = np.zeros(input1.size) else: eHeII_1640 = input1['eHeII_1640'] if Label_OIII_1665 == False: OIII_1665 = np.zeros(input1.size) else: OIII_1665 = input1['OIII_1665'] if Label_eOIII_1665 == False: eOIII_1665 = np.zeros(input1.size) else: eOIII_1665 = input1['eOIII_1665'] if Label_CIII == False: CIII_1909 = np.zeros(input1.size) else: CIII_1909 = input1['CIII_1909'] if Label_eCIII == False: eCIII_1909 = np.zeros(input1.size) else: eCIII_1909 = input1['eCIII_1909'] if Label_Hbeta == False: Hb_4861 = np.zeros(len(input1)) else: Hb_4861 = input1['Hb_4861'] if Label_eHbeta == False: eHb_4861 = np.zeros(input1.size) else: eHb_4861 = input1['eHb_4861'] if Label_OIII_5007 == False: OIII_5007 = np.zeros(input1.size) else: OIII_5007 = input1['OIII_5007'] if Label_eOIII_5007 == False: eOIII_5007 = np.zeros(input1.size) else: eOIII_5007 = input1['eOIII_5007'] ################################################################ ###### OUTPUT FORMAT AND INFORMATION: ONLY EMISSION LINES ###### ################################################################ #Creation of output only with information from inputs aux_list = [] aux_list.append(('ID','U12')) if Label_Lya == True: aux_list.append(('Lya_1216', float)) if Label_eLya == True: aux_list.append(('eLya_1216', float)) if Label_NV == True: aux_list.append(('NV_1239', float)) if Label_eNV == True: aux_list.append(('eNV_1239', float)) if Label_CIV == True: aux_list.append(('CIV_1549', float)) if Label_eCIV == True: aux_list.append(('eCIV_1549', float)) if Label_HeII == True: aux_list.append(('HeII_1640', float)) if Label_eHeII == True: aux_list.append(('eHeII_1640', float)) if Label_OIII_1665 == True: aux_list.append(('OIII_1665', float)) if Label_eOIII_1665 == True: aux_list.append(('eOIII_1665', float)) if Label_CIII == True: aux_list.append(('CIII_1909', float)) if Label_eCIII == True: aux_list.append(('eCIII_1909', float)) if Label_Hbeta == True: aux_list.append(('Hb_4861', float)) if Label_eHbeta == True: aux_list.append(('eHb_4861', float)) if Label_OIII_5007 == True: aux_list.append(('OIII_5007', float)) if Label_eOIII_5007 == True: aux_list.append(('eOIII_5007', float)) aux_list.append(('grid', int)) aux_list.append(('OH', float)) aux_list.append(('eOH', float)) aux_list.append(('CO', float)) aux_list.append(('eCO', float)) aux_list.append(('logU', float)) aux_list.append(('elogU', float)) output = np.zeros(input1.size, dtype=aux_list) output['ID'] = Names if Label_Lya == True: output['Lya_1216'] = Lya_1216 if Label_eLya == True: output['eLya_1216'] = eLya_1216 if Label_NV == True: output['NV_1239'] = NV_1239 if Label_eNV == True: output['eNV_1239'] = eNV_1239 if Label_CIV == True: output['CIV_1549'] = CIV_1549 if Label_eCIV == True: output['eCIV_1549'] = eCIV_1549 if Label_HeII == True: output['HeII_1640'] = HeII_1640 if Label_eHeII == True: output['eHeII_1640'] = eHeII_1640 if Label_OIII_1665 == True: output['OIII_1665'] = OIII_1665 if Label_eOIII_1665 == True: output['eOIII_1665'] = eOIII_1665 if Label_CIII == True: output['CIII_1909'] = CIII_1909 if Label_eCIII == True: output['eCIII_1909'] = eCIII_1909 if Label_Hbeta == True: output['Hb_4861'] = Hb_4861 if Label_eHbeta == True: output['eHb_4861'] = eHb_4861 if Label_OIII_5007 == True: output['OIII_5007'] = OIII_5007 if Label_eOIII_5007 == True: output['eOIII_5007'] = eOIII_5007 ################################################ ###### ESTIMATIONS OF CHEMICAL ABUNDANCES ###### ################################################ #Display for the user print ('Calculating....') print ('') print ('') print ('----------------------------------------------------------------') print ('(%) ID Grid 12+log(O/H) log(C/O) log(U)') print ('----------------------------------------------------------------') # Beginning of loop of calculation count = 0 for tab in range(0,len(input1),1): count = count + 1 OH_mc = [] CO_mc = [] logU_mc = [] OHe_mc = [] COe_mc = [] logUe_mc = [] #Starting Montecarlo for monte in range(0,n,1): OH_p = 0 logU_p = 0 CO_p = 0 den_OH = 0 den_CO = 0 OH_e = 0 CO_e = 0 logU_e = 0 den_OH_e = 0 den_CO_e = 0 tol_max = 1e3 #Generating observable values for emission lines Lya_1216_obs = 0 if Lya_1216[tab] <= 0: Lya_1216_obs = 0 else: while Lya_1216_obs <= 0: Lya_1216_obs = np.random.normal(Lya_1216[tab],eLya_1216[tab]+1e-5) NV_1239_obs = 0 if NV_1239[tab]<= 0: NV_1239_obs = 0 else: while NV_1239_obs <= 0: NV_1239_obs = np.random.normal(NV_1239[tab],eNV_1239[tab]+1e-5) CIV_1549_obs = 0 if CIV_1549[tab] <= 0: CIV_1549_obs = 0 else: while CIV_1549_obs <= 0: CIV_1549_obs = np.random.normal(CIV_1549[tab],eCIV_1549[tab]+1e-5) HeII_1640_obs = 0 if HeII_1640[tab] <= 0: HeII_1640_obs = 0 else: if HeII_1640_obs <= 0: HeII_1640_obs = np.random.normal(HeII_1640[tab],eHeII_1640[tab]+1e-5) OIII_1665_obs = 0 if OIII_1665[tab] == 0: OIII_1665_obs = 0 else: while OIII_1665_obs <= 0: OIII_1665_obs = np.random.normal(OIII_1665[tab],eOIII_1665[tab]+1e-5) CIII_1909_obs = 0 if CIII_1909[tab] <= 0: CIII_1909_obs = 0 else: while CIII_1909_obs <= 0: CIII_1909_obs = np.random.normal(CIII_1909[tab],eCIII_1909[tab]+1e-5) Hb_4861_obs = 0 if Hb_4861[tab] <= 0: Hb_4861_obs = 0 else: while Hb_4861_obs <= 0: Hb_4861_obs = np.random.normal(Hb_4861[tab],eHb_4861[tab]+1e-5) OIII_5007_obs = 0 if OIII_5007[tab] <= 0: OIII_5007_obs = 0 else: while OIII_5007_obs <= 0: OIII_5007_obs = np.random.normal(OIII_5007[tab],eOIII_5007[tab]+1e-5) #Observables if OIII_1665_obs <= 0 or OIII_5007_obs <= 0: ROIII_obs = 0 else: ROIII_obs = OIII_5007_obs/OIII_1665_obs if Lya_1216_obs == 0 or NV_1239_obs == 0: N5_obs = 0 else: N5_obs = (NV_1239_obs ) / (Lya_1216_obs) if HeII_1640_obs == 0 or NV_1239_obs == 0: N5He2_obs = 0 else: N5He2_obs = (NV_1239_obs) / (HeII_1640_obs) if Lya_1216_obs <= 0 or CIII_1909_obs <= 0 or CIV_1549_obs <= 0: C34_obs = 0 else: C34_obs = (CIII_1909_obs + CIV_1549_obs) / (Lya_1216_obs) if HeII_1640_obs <= 0 or CIII_1909_obs <= 0 or CIV_1549_obs <= 0: C34He2_obs = 0 else: C34He2_obs = (CIII_1909_obs + CIV_1549_obs) / (HeII_1640_obs) if CIII_1909_obs <= 0 or OIII_1665_obs <= 0 or CIV_1549_obs <= 0: C3O3_obs = -10 else: C3O3_obs = np.log10((CIII_1909_obs) / (OIII_1665_obs)) if CIII_1909_obs <= 0 or CIV_1549_obs <= 0: C3C4_obs = 0 else: C3C4_obs = (CIII_1909_obs/CIV_1549_obs) if CIII_1909_obs <= 0 or Hb_4861_obs <= 0: C34Hb_obs = 0 else: C34Hb_obs = (CIII_1909_obs + CIV_1549_obs) / Hb_4861_obs # Selection of grid if OIII_1665[tab] > 0 and OIII_5007[tab] > 0: grid = grid1 if monte == n-1: grids.append(1) grid_type = 1 elif OIII_1665[tab] > 0 and CIII_1909[tab] > 0: grid = grid2 if monte == n-1: grids.append(2) grid_type = 2 else: grid = grid3 if monte == n-1: grids.append(3) grid_type = 3 ###################### # Calculation of C/O # ###################### if C3O3_obs == -10: CO = -10 else: CHI_ROIII = 0 CHI_C3O3 = 0 CHI_CO = 0 for index in grid: if ROIII_obs == 0: CHI_ROIII = 0 elif index['OIII_1665'] == 0 or index['OIII_5007'] == 0: CHI_ROIII = tol_max else: CHI_ROIII = (index['OIII_5007']/index['OIII_1665'] - ROIII_obs)**2/(index['OIII_5007']/index['OIII_1665']) if C3O3_obs == -10: CHI_C3O3 = 0 elif index['CIII_1909'] == 0 or index['OIII_1665'] == 0: CHI_C3O3 = tol_max else: CHI_C3O3 =(np.log10((index['CIII_1909'])/index['OIII_1665']) - C3O3_obs)**2/np.log10((index['CIII_1909'])/(index['OIII_1665']+1e-5)) CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5 if CHI_CO == 0: CO_p = CO_p den_CO = den_CO else: CO_p = index['logCO'] /(CHI_CO)**2 + CO_p den_CO = 1 / (CHI_CO)**2 + den_CO CO = CO_p / den_CO # Calculation of C/O error if C3O3_obs == -10: eCO = 0 else: CHI_ROIII = 0 CHI_C3O3 = 0 CHI_CO = 0 for index in grid: if ROIII_obs == 0: CHI_ROIII = 0 elif index['OIII_1665'] == 0 or index['OIII_5007'] == 0: CHI_ROIII = tol_max else: CHI_ROIII = (index['OIII_5007']/index['OIII_1665'] - ROIII_obs)**2/(index['OIII_5007']/index['OIII_1665']) if C3O3_obs == -10: CHI_C3O3 = 0 elif index['CIII_1909'] == 0 or index['OIII_1665'] == 0: CHI_C3O3 = tol_max else: CHI_C3O3 =(np.log10((index['CIII_1909'])/index['OIII_1665']) - C3O3_obs)**2/np.log10((index['CIII_1909'])/(index['OIII_1665']+1e-5)) CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5 if CHI_CO == 0: CO_e = CO_e den_CO_e = den_CO_e else: CO_e = (index['logCO'] - CO)**2 / (CHI_CO)**2 + CO_e den_CO_e = 1 /(CHI_CO)**2 + den_CO_e eCO = CO_e / den_CO_e ############################### # Calculation of O/H and logU # ############################### if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0 and N5_obs == 0 and N5He2_obs == 0: OH = 0 logU = 0 else: CHI_ROIII = 0 CHI_C3C4 = 0 CHI_C34He2 = 0 CHI_C34 = 0 CHI_C34Hb = 0 CHI_N5 = 0 CHI_N5He2 = 0 CHI_OH = 0 for index in grid: if CO > -10 and np.abs(index['logCO'] - CO) > np.abs(eCO+0.125): continue if NV_1239_obs > 0 and index['NV_1239'] == 0: continue if CIV_1549_obs > 0 and index['CIV_1549'] == 0: continue if HeII_1640_obs > 0 and index['HeII_1640'] == 0: continue else: if ROIII_obs == 0: CHI_ROIII = 0 elif index['OIII_1665'] == 0 or index['OIII_5007'] == 0: CHI_ROIII = tol_max else: CHI_ROIII = (index['OIII_5007']/index['OIII_1665'] - ROIII_obs)**2/(index['OIII_5007']/index['OIII_1665']) if N5_obs == 0: CHI_N5 = 0 elif index['Lya_1216'] == 0 or index['NV_1239'] == 0: CHI_N5 = tol_max else: CHI_N5 = ((index['NV_1239'])/index['Lya_1216'] - N5_obs)**2/((index['NV_1239'])/index['Lya_1216']) if N5He2_obs == 0: CHI_N5He2 = 0 elif index['HeII_1640'] == 0 or index['NV_1239'] == 0: CHI_N5He2 = tol_max else: CHI_N5He2 = ((index['NV_1239'])/index['HeII_1640'] - N5He2_obs)**2/((index['NV_1239'])/index['HeII_1640']) if C34_obs == 0: CHI_C34 = 0 elif index['Lya_1216'] == 0 or index['CIII_1909'] == 0: CHI_C34 = tol_max else: CHI_C34 = ((index['CIII_1909']+index['CIV_1549'])/index['Lya_1216'] - C34_obs)**2/((index['CIII_1909']+index['CIV_1549'])/index['Lya_1216']) if C34He2_obs == 0: CHI_C34He2 = 0 elif index['HeII_1640'] == 0 or index['CIII_1909'] == 0: CHI_C34He2 = tol_max else: CHI_C34He2 = ((index['CIII_1909']+index['CIV_1549'])/index['HeII_1640'] - C34He2_obs)**2/((index['CIII_1909']+index['CIV_1549'])/index['HeII_1640']) if C34Hb_obs == 0: CHI_C34Hb = 0 elif index['CIII_1909'] == 0: CHI_C34Hb = tol_max else: CHI_C34Hb = (index['CIII_1909']+index['CIV_1549'] - C34Hb_obs)**2/(index['CIII_1909']+index['CIV_1549']) if C3C4_obs == 0: CHI_C3C4 = 0 elif index['CIV_1549'] == 0 or index['CIII_1909'] == 0: CHI_C3C4 = tol_max else: CHI_C3C4 = (index['CIII_1909']/index['CIV_1549'] - C3C4_obs)**2/(index['CIII_1909']/index['CIV_1549']) if C34Hb_obs > 0: CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5 else: CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_N5**2 + CHI_N5He2**2 + CHI_C3C4**2 )**0.5 if CHI_OH == 0: OH_p = OH_p logU_p = logU_p den_OH = den_OH else: OH_p = index['12logOH'] / (CHI_OH)**2 + OH_p logU_p = index['logU'] / (CHI_OH)**2 + logU_p den_OH = 1 /(CHI_OH)**2 + den_OH if OH_p == 0: OH = 0 else: OH = OH_p / den_OH if logU_p == 0: logU = 0 else: logU = logU_p / den_OH #Impossibility for AGN in the estimation if sed == 3 and Lya_1216[tab] == 0 and HeII_1640[tab] == 0 and Hb_4861[tab] == 0: OH = 0 # Calculation of error of O/H and logU if C34_obs == 0 and ROIII_obs == 0 and C34Hb_obs == 0 and C34He2_obs == 0 and N5_obs == 0 and N5He2_obs == 0: eOH = 0 elogU = 0 else: CHI_ROIII = 0 CHI_N5 = 0 CHI_N5He2 = 0 CHI_C3C4 = 0 CHI_C34 = 0 CHI_C34He2 = 0 CHI_C34Hb = 0 CHI_OH = 0 for index in grid: if CO > -10 and np.abs(index['logCO'] - CO) > np.abs(eCO+res_CO): continue if NV_1239_obs > 0 and index['NV_1239'] == 0: continue if CIV_1549_obs > 0 and index['CIV_1549'] == 0: continue if HeII_1640_obs > 0 and index['HeII_1640'] == 0: continue else: if ROIII_obs == 0: CHI_ROIII = 0 elif index['OIII_1665'] == 0 or index['OIII_5007'] == 0: CHI_ROIII = tol_max else: CHI_ROIII = (index['OIII_5007']/index['OIII_1665'] - ROIII_obs)**2/(index['OIII_5007']/index['OIII_1665']) if N5_obs == 0: CHI_N5 = 0 elif index['Lya_1216'] == 0 or index['NV_1239'] == 0: CHI_N5 = tol_max else: CHI_N5 = ((index['NV_1239'])/index['Lya_1216'] - N5_obs)**2/((index['NV_1239'])/index['Lya_1216']) if N5He2_obs == 0: CHI_N5He2 = 0 elif index['HeII_1640'] == 0 or index['NV_1239'] == 0: CHI_N5He2 = tol_max else: CHI_N5He2 = ((index['NV_1239'])/index['HeII_1640'] - N5He2_obs)**2/((index['NV_1239'])/index['HeII_1640']) if C34_obs == 0: CHI_C34 = 0 elif index['Lya_1216'] == 0 or index['CIII_1909'] == 0: CHI_C34 = tol_max else: CHI_C34 = ((index['CIII_1909']+index['CIV_1549'])/index['Lya_1216'] - C34_obs)**2/((index['CIII_1909']+index['CIV_1549'])/index['Lya_1216']) if C34He2_obs == 0: CHI_C34He2 = 0 elif index['HeII_1640'] == 0 or index['CIII_1909'] == 0: CHI_C34He2 = tol_max else: CHI_C34He2 = ((index['CIII_1909']+index['CIV_1549'])/index['HeII_1640'] - C34He2_obs)**2/((index['CIII_1909']+index['CIV_1549'])/index['HeII_1640']) if C34Hb_obs == 0: CHI_C34Hb = 0 elif index['CIII_1909'] == 0: CHI_C34Hb = tol_max else: CHI_C34Hb = (index['CIII_1909']+index['CIV_1549'] - C34Hb_obs)**2/(index['CIII_1909']+index['CIV_1549']) if C3C4_obs == 0: CHI_C3C4 = 0 elif index['CIV_1549'] == 0 or index['CIII_1909'] == 0: CHI_C3C4 = tol_max else: CHI_C3C4 = (index['CIII_1909']/index['CIV_1549'] - C3C4_obs)**2/(index['CIII_1909']/index['CIV_1549']) if C34Hb_obs > 0: CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5 else: CHI_OH = (CHI_ROIII**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_N5**2 + CHI_N5He2**2 + CHI_C3C4**2 )**0.5 if CHI_OH == 0: OH_e = OH_e logU_e = logU_e den_OH_e = den_OH_e else: OH_e = (index['12logOH'] - OH)**2 /(CHI_OH)**2 + OH_e logU_e = (index['logU'] - logU)**2 /(CHI_OH)**2 + logU_e den_OH_e = 1 /(CHI_OH)**2 + den_OH_e if OH_e == 0: eOH = 0 else: eOH = OH_e / den_OH_e if logU_e == 0: elogU = 0 else: elogU = logU_e / den_OH_e #Impossiiblity in AGNs to determine O/H without recombination lines if sed == 3 and Lya_1216[tab] == 0 and HeII_1640[tab] == 0 and Hb_4861[tab] == 0: eOH = 0 # Iterations for interpolated models if inter == 0 or (OH == 0 and CO == -10): COf = CO OHf = OH logUf = logU elif inter == 1: if OH == 0: igrid = grid else: igrid = interpolate(grid,2,logU-elogU-0.25,logU+elogU+0.25,10) igrid = igrid[np.lexsort((igrid['logCO'],igrid['logU']))] igrid = interpolate(igrid,0,OH-eOH-0.1,OH+eOH+0.1,10) if CO == -10: igrid = igrid else: igrid = igrid[np.lexsort((igrid['12logOH'],igrid['logU']))] igrid = interpolate(igrid,1,CO-eCO-0.125,CO+eCO+0.125,10) CHI_ROIII = 0 CHI_C3O3 = 0 CHI_C3C4 = 0 CHI_N5 = 0 CHI_N5He2 = 0 CHI_C34He2 = 0 CHI_C34 = 0 CHI_C34Hb = 0 CHI_OH = 0 CHI_CO = 0 for index in igrid: if ROIII_obs == 0: CHI_ROIII = 0 elif index['OIII_1665'] == 0 or index['OIII_5007'] == 0: CHI_ROIII = tol_max else: CHI_ROIII = (index['OIII_5007']/index['OIII_1665'] - ROIII_obs)**2/(index['OIII_5007']/index['OIII_1665']) if N5_obs == 0: CHI_N5 = 0 elif index['Lya_1216'] == 0 or index['NV_1239'] == 0: CHI_N5 = tol_max else: CHI_N5 = ((index['NV_1239'])/index['Lya_1216'] - N5_obs)**2/((index['NV_1239'])/index['Lya_1216']) if N5He2_obs == 0: CHI_N5He2 = 0 elif index['HeII_1640'] == 0 or index['NV_1239'] == 0: CHI_N5He2 = tol_max else: CHI_N5He2 = ((index['NV_1239'])/index['HeII_1640'] - N5He2_obs)**2/((index['NV_1239'])/index['HeII_1640']) if C3O3_obs == -10: CHI_C3O3 = 0 elif index['CIII_1909'] == 0 or index['OIII_1665'] == 0: CHI_C3O3 = tol_max else: CHI_C3O3 =(np.log10((index['CIII_1909'])/index['OIII_1665']) - C3O3_obs)**2/np.log10((index['CIII_1909'])/(index['OIII_1665']+1e-5)) if C34_obs == 0: CHI_C34 = 0 elif index['Lya_1216'] == 0: CHI_C34 = tol_max else: CHI_C34 = ((index['CIV_1549']+index['CIII_1909'])/index['Lya_1216'] - C34_obs)**2/((index['CIV_1549']+index['CIII_1909'])/index['Lya_1216']) if C34Hb_obs == 0: CHI_C34Hb = 0 elif index['CIV_1549'] == 0: CHI_C34Hb = tol_max else: CHI_C34Hb = (index['CIV_1549']+index['CIII_1909'] - C34_obs)**2/(index['CIV_1549']+index['CIII_1909']) if C3C4_obs == 0: CHI_C3C4 = 0 elif index['CIII_1909'] == 0 or index['CIV_1549'] == 0: CHI_C3C4 = tol_max else: CHI_C3C4 = (index['CIV_1549']/index['CIII_1909'] - C3C4_obs)**2/(index['CIV_1549']/index['CIII_1909']) if C34Hb_obs > 0: CHI_OH = (CHI_ROIII**2 + CHI_C34Hb**2 + CHI_C3C4**2)**0.5 else: CHI_OH = (CHI_ROIII**2 + CHI_N5**2 + CHI_N5He2**2 + CHI_C34**2 + CHI_C34He2**2 + CHI_C3C4**2 )**0.5 if CHI_OH == 0: OH_p = OH_p logU_p = logU_p den_OH = den_OH else: OH_p = index['12logOH'] /(CHI_OH)**2 + OH_p logU_p = index['logU'] /(CHI_OH)**2 + logU_p den_OH = 1 /(CHI_OH)**2 + den_OH CHI_CO = (CHI_ROIII**2 + CHI_C3O3**2 )**0.5 if CHI_CO == 0: CO_p = CO_p den_CO = den_CO else: CO_p = index['logCO'] /(CHI_CO)**2**2 + CO_p den_CO = 1 /(CHI_CO)**2**2 + den_CO if CO == -10: COf = -10 else: COf = CO_p / den_CO if OH == 0: OHf = 0 logUf = 0 else: OHf = OH_p / den_OH logUf = logU_p / den_OH if OHf > 0: OH_mc.append(OHf) if COf > -10: CO_mc.append(COf) if logUf < 0: logU_mc.append(logUf) if OHf > 0: OHe_mc.append(eOH) if COf > -10: COe_mc.append(eCO) if logUf < 0: logUe_mc.append(elogU) if len(OH_mc) > 0: OHff = np.mean(OH_mc) eOHff = (np.std(OH_mc)**2+np.mean(OHe_mc)**2)**0.5 else: OHff = 0 eOHff = 0 if len(logU_mc) > 0: logUff = np.mean(logU_mc) elogUff = (np.std(logU_mc)**2+np.mean(logUe_mc)**2)**0.5 else: elogUff = 0 logUff = 0 if len(CO_mc) > 0: COff = np.mean(CO_mc) eCOff = (np.std(CO_mc)**2+np.mean(COe_mc)**2)**0.5 else: COff = -10 eCOff = 0 OHffs.append(OHff) eOHffs.append(eOHff) COffs.append(COff) eCOffs.append(eCOff) logUffs.append(logUff) elogUffs.append(elogUff) ################################## # Displaying results in terminal # ################################## if input0.size == 1 and tab==0: continue print (round(100*(count)/float(input1.size),1),'%',Names[tab],grid_type,'', round(OHff,2), round(eOHff,2),'',round(COff,2), round(eCOff,2), '',round(logUff,2), round(elogUff,2)) #################################################### ###### OUTPUT FORMAT AND INFORMATION: RESULTS ###### #################################################### #Grid used and results from the free parameters output['grid'] = grids output['OH'] = OHffs output['eOH'] = eOHffs output['CO'] = COffs output['eCO'] = eCOffs output['logU'] = logUffs output['elogU'] = elogUffs if input0.size == 1: output = np.delete(output,obj=1,axis=0) #Header comments for the file lineas_header = [' HII-CHI-mistry_UV v.5.0 output file', 'Input file:'+input00,'Iterations for MonteCarlo: '+str(n),'Used models: '+sed_type,'Library file used : '+file_lib_2, 'Template used to constraint grid of models: '+const_file,''] #Labels for columns (emission lines) line_label = '{:30} '.format(output.dtype.names[0]) for ind2 in range(1, len(output.dtype.names)): line_label += '{:30} '.format(output.dtype.names[ind2]) #Labels for columns lineas_header.append(line_label) header = '\n'.join(lineas_header) #Results np.savetxt('.'.join(input00.split('.')[:-1])+'_hcm-uv-output.dat',output,fmt=' '.join(['%s']*1+['%.3f']*(len(output.dtype.names)-8)+['%i']+['%.2f']*6), header=header) lines_stor = [] with open('.'.join(input00.split('.')[:-1])+'_hcm-uv-output.dat', 'r+') as output_file: for line in output_file: lines_stor.append(line) #Reformating output for better reading of the table file_overwrite = open('.'.join(input00.split('.')[:-1])+'_hcm-uv-output.dat', 'r+') file_overwrite.seek(0) for line_n in lines_stor: if line_n[0] == '#' and line_n[2:4] == 'ID': file_overwrite.write(line_n[2:]) else: file_overwrite.write(line_n) file_overwrite.truncate() file_overwrite.close() print ('-------------------------------------------------') print ('Results are stored in ' + '.'.join(input00.split('.')[:-1]) + '_hcm-uv-output.dat') print ('-------------------------------------------------') ############################################# ###### INFORMATION AND CONTACT DETAILS ###### ############################################# # Enrique Perez-Montero, [email protected] # Borja Perez-Diaz, [email protected] ################# ###### END ###### #################
python
__all__ = [ 'apply', 'applyCSS', 'change', 'changeCSS', 'delete', 'forwarddelete', 'insert', 'queryEnabled', 'queryIndeterm', 'queryState', 'querySupported', 'queryValue', 'selection', 'unapply', 'unapplyCSS' ]
python
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from urllib.parse import urlparse from typing import Dict, Type from .base import Client, Server _scheme_to_client_types: Dict[str, Type[Client]] = dict() _scheme_to_server_types: Dict[str, Type[Server]] = dict() def register_client(client_type: Type[Client]): _scheme_to_client_types[client_type.scheme] = client_type return client_type def register_server(server_type: Type[Server]): _scheme_to_server_types[server_type.scheme] = server_type return server_type def _check_scheme(scheme: str, types: Dict): if scheme == '': scheme = None if scheme not in types: # pragma: no cover raise ValueError(f'address illegal, address scheme ' f'should be one of ' f'{", ".join(types)}, ' f'got {scheme}') return scheme def get_client_type(address: str) -> Type[Client]: if '://' not in address: scheme = None else: scheme = urlparse(address).scheme scheme = _check_scheme(scheme, _scheme_to_client_types) return _scheme_to_client_types[scheme] def get_server_type(address: str) -> Type[Server]: if '://' not in address: scheme = None else: scheme = urlparse(address).scheme scheme = _check_scheme(scheme, _scheme_to_server_types) return _scheme_to_server_types[scheme] def gen_internal_address(process_index: int) -> str: return f'unixsocket:///{process_index}'
python
from Application import Application if __name__ == '__main__': app = Application()
python
# determines whether a matrix is orthogonal. A square matrix is orthogonal, # if its columns and rows are orthogonal unit vectors, # which is equivalent to: MT M = I import numpy as np def check_orthogonal(M): # make sure the input is a matrix if len(np.shape(M)) !=2: print("error: input is not a matrix") return # make sure the input is not a square matrix dim = np.shape(M)[0] if dim != np.shape(M)[1]: print("error: input is not a square matrix") return A = np.dot(M, M.T) # if np.array_equal(A, np.identity(dim)): [rows, cols] = A.shape I = np.identity(dim) for i in range(rows): for j in range(cols): if not (A[i, j] - I[i, j] <= 10e-3): print("matrix is not orthogonal") return print("matrix is orthogonal") if __name__ == '__main__': # Verify check_orthogonal function D = 1. / 3. * np.array( [[2, 2, -1], [2, -1, 2], [-1, 2, 2]]) check_orthogonal(D) # Test 2 R = np.array([[np.cos(np.pi / 4), -np.sin(np.pi / 4)], [np.sin(np.pi / 4), np.cos(np.pi / 4)]]) check_orthogonal(R)
python
""" 抽象工厂 代码实例 Abstract Factory Code Demo 家具工厂 """ from __future__ import annotations from abc import ABC, abstractmethod class Chair(ABC): """ product interface 1: Chair """ @abstractmethod def sit_on(self) -> str: pass class Sofa(ABC): """ product interface 2: Sofa """ @abstractmethod def lie_on(self) -> str: pass class ModernChair(Chair): """ product implement Chair: ModernChair """ def sit_on(self) -> str: return 'I sit on a Modern Chair' class ClassicChair(Chair): """ product implement Chair: ClassicChair """ def sit_on(self) -> str: return 'I sit on a Classic Chair' class ModernSofa(Sofa): """ product implement Sofa: ModernSofa """ def lie_on(self) -> str: return 'I sit on a Modern Sofa' class ClassicSofa(Sofa): """ product implement Sofa: ClassicSofa """ def lie_on(self) -> str: return 'I sit on a Classic Sofa' class FurnitureFactory(ABC): """ 一个抽象工厂接口 定义了一系列方法,用来返回不同的抽象产品 The Abstract Factory interface declares a set of methods that return different abstract products. 家具工厂生成沙发和椅子 Furniture Factory produce Chair and SOfa """ @abstractmethod def produce_chair(self) -> Chair: pass @abstractmethod def produce_sofa(self) -> Sofa: pass class ModernFurnitureFactory(FurnitureFactory): """ 一个抽象工厂的实现类 implement FurnitureFactory to produce true product """ def produce_chair(self) -> Chair: print('ModernFurnitureFactory produce chair ...') return ModernChair() def produce_sofa(self) -> Sofa: print('ModernFurnitureFactory produce sofa ...') return ModernSofa() class ClassicFurnitureFactory(FurnitureFactory): """ 一个抽象工厂的实现类 implement FurnitureFactory to produce true product """ def produce_chair(self) -> Chair: print('ClassicFurnitureFactory produce chair ...') return ClassicChair() def produce_sofa(self) -> Sofa: print('ClassicFurnitureFactory produce sofa ...') return ClassicSofa() def client_code(factory: FurnitureFactory): chair = factory.produce_chair() print(chair.sit_on()) sofa = factory.produce_sofa() print(sofa.lie_on()) if __name__ == '__main__': print('\r\n--- I want some Modern Furniture ---\r\n') client_code(ModernFurnitureFactory()) print('\r\n--- I want some Classic Furniture ---\r\n') client_code(ClassicFurnitureFactory())
python
import requests import time import datetime import json import csv # def get(t): # res_text=requests.get('http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=0000011,3990012&sty=CTBFTA&st=z&sr=&p=&ps=&cb=&token=70f12f2f4f091e459a279469fe49eca5').text # data=eval(res_text) # dh=data[0].split(',') # ds=data[1].split(',') # # 超大单流入 # data_1='%.4f'%((float(dh[7]) + float(ds[7])) / 100000000) # data_2='%.4f'%((float(dh[8]) + float(ds[8])) / 100000000) # data_3='%.4f'%((float(dh[11]) + float(ds[11])) / 100000000) # data_4='%.4f'%((float(dh[12]) + float(ds[12])) / 100000000) # data_5='%.4f'%((float(dh[15]) + float(ds[15])) / 100000000) # data_6='%.4f'%((float(dh[16]) + float(ds[16])) / 100000000) # data_7='%.4f'%((float(dh[19]) + float(ds[19])) / 100000000) # data_8='%.4f'%((float(dh[20]) + float(ds[20])) / 100000000) # datalist=[str(t)[11:16],data_1,data_2,data_3,data_4,data_5,data_6,data_7,data_8] # print(datalist) # targetData.append(datalist) # targetData=[] # while True: # nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # if int(str(nowTime)[11:13])>=15 and int(str(nowTime)[14:16])>0: # writetoCsv('实时成交'+nowTime[:10]+'.csv',targetData) # break # elif int(str(nowTime)[11:13])==11 and int(str(nowTime)[14:16])==30: # writetoCsv('实时成交'+nowTime[:10]+'.csv',targetData) # targetData=[] # time.sleep(5340.125) # else: # try: # get(nowTime) # except e: # writetoCsv('实时成交'+nowTime[:10]+'.csv',targetData) # targetData=[] # print('error,attempingting,please wait') # get(nowTime) # time.sleep(59.875) import time from threading import Timer #需要补齐包 def getdata(): nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') w.start() data0= #需要补齐的内容 target=[] target.append(str(nowTime)[11:19]) for d in data0.Data : target.append(d[0]) print(target) writetoCsv('实时成交'+str(nowTime)[:10]+'.csv',target) t=Timer(60,getdata()).start() def writetoCsv(filename,writelist,header=None): out=open(filename, 'a+',encoding='gb18030',newline = '') csv_write=csv.writer(out) csv_write.writerow(wlist) t=Timer(60,getdata()).start() while True: if int(str(nowTime)[11:13])>=15 and int(str(nowTime)[14:16])>2: t.cancel() break time.sleep(120)
python
import socket from socket import * from win32clipboard import * from win32con import * print "ClipCross Alpha" host = "" #Accept connection from any machine. port = 6000 #We will communicate over port 6000 of this machine. Ports 0-1024 are restricted, ports 1025-65535 are not. s="" try: sock = socket() #Create a network socket. By default, it is a TCP socket print "Socket successfully created" sock.bind((host,port)) #Binds to the port print "Socket successfully bound to port %d" %(port) sock.listen(1) #We want to listen only to one connection at a time print "Socket listening for connections..." con, address = sock.accept() print "Recieved connection from %s" %(str(address)) from Tkinter import * import tkMessageBox root = Tk() root.withdraw() query = tkMessageBox.askquestion('Incoming Clipboard Data', 'Do you wish to recieve clipboard data from %s?' %(str(address[0])), icon = 'warning') if query == 'yes': s= str(con.recv(65536)) try: OpenClipboard() EmptyClipboard() SetClipboardData(CF_TEXT, s) CloseClipboard() except: print "Error in accessing clipboard data!!!" sys.exit() print "Recieved clipboard data from client" con.send("Thank you for connecting. Your data was successfully recieved.") else: con.send("The user you were trying to send data to declined your clipboard data.") except: print "Error in networking!" sys.exit() finally: con.close()
python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__ = ['Instance'] class Instance(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, alternative_location_id: Optional[pulumi.Input[str]] = None, authorized_network: Optional[pulumi.Input[str]] = None, connect_mode: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, location_id: Optional[pulumi.Input[str]] = None, memory_size_gb: Optional[pulumi.Input[float]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, redis_configs: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, redis_version: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, reserved_ip_range: Optional[pulumi.Input[str]] = None, tier: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ A Google Cloud Redis instance. To get more information about Instance, see: * [API documentation](https://cloud.google.com/memorystore/docs/redis/reference/rest/) * How-to Guides * [Official Documentation](https://cloud.google.com/memorystore/docs/redis/) ## Example Usage :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] alternative_location_id: Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in [locationId]. :param pulumi.Input[str] authorized_network: The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. :param pulumi.Input[str] connect_mode: The connection mode of the Redis instance. Default value is `DIRECT_PEERING`. Possible values are `DIRECT_PEERING` and `PRIVATE_SERVICE_ACCESS`. :param pulumi.Input[str] display_name: An arbitrary and optional user-provided name for the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Resource labels to represent user provided metadata. :param pulumi.Input[str] location_id: The zone where the instance will be provisioned. If not provided, the service will choose a zone for the instance. For STANDARD_HA tier, instances will be created across two zones for protection against zonal failures. If [alternativeLocationId] is also provided, it must be different from [locationId]. :param pulumi.Input[float] memory_size_gb: Redis memory size in GiB. :param pulumi.Input[str] name: The ID of the instance or a fully qualified identifier for the instance. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] redis_configs: Redis configuration parameters, according to http://redis.io/topics/config. Please check Memorystore documentation for the list of supported parameters: https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs :param pulumi.Input[str] redis_version: The version of Redis software. If not provided, latest supported version will be used. Currently, the supported values are: - REDIS_5_0 for Redis 5.0 compatibility - REDIS_4_0 for Redis 4.0 compatibility - REDIS_3_2 for Redis 3.2 compatibility :param pulumi.Input[str] region: The name of the Redis region of the instance. :param pulumi.Input[str] reserved_ip_range: The CIDR range of internal addresses that are reserved for this instance. If not provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets in an authorized network. :param pulumi.Input[str] tier: The service tier of the instance. Must be one of these values: - BASIC: standalone instance - STANDARD_HA: highly available primary/replica instances Default value is `BASIC`. Possible values are `BASIC` and `STANDARD_HA`. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['alternative_location_id'] = alternative_location_id __props__['authorized_network'] = authorized_network __props__['connect_mode'] = connect_mode __props__['display_name'] = display_name __props__['labels'] = labels __props__['location_id'] = location_id if memory_size_gb is None: raise TypeError("Missing required property 'memory_size_gb'") __props__['memory_size_gb'] = memory_size_gb __props__['name'] = name __props__['project'] = project __props__['redis_configs'] = redis_configs __props__['redis_version'] = redis_version __props__['region'] = region __props__['reserved_ip_range'] = reserved_ip_range __props__['tier'] = tier __props__['create_time'] = None __props__['current_location_id'] = None __props__['host'] = None __props__['persistence_iam_identity'] = None __props__['port'] = None super(Instance, __self__).__init__( 'gcp:redis/instance:Instance', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, alternative_location_id: Optional[pulumi.Input[str]] = None, authorized_network: Optional[pulumi.Input[str]] = None, connect_mode: Optional[pulumi.Input[str]] = None, create_time: Optional[pulumi.Input[str]] = None, current_location_id: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, host: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, location_id: Optional[pulumi.Input[str]] = None, memory_size_gb: Optional[pulumi.Input[float]] = None, name: Optional[pulumi.Input[str]] = None, persistence_iam_identity: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[float]] = None, project: Optional[pulumi.Input[str]] = None, redis_configs: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, redis_version: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, reserved_ip_range: Optional[pulumi.Input[str]] = None, tier: Optional[pulumi.Input[str]] = None) -> 'Instance': """ Get an existing Instance resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] alternative_location_id: Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in [locationId]. :param pulumi.Input[str] authorized_network: The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. :param pulumi.Input[str] connect_mode: The connection mode of the Redis instance. Default value is `DIRECT_PEERING`. Possible values are `DIRECT_PEERING` and `PRIVATE_SERVICE_ACCESS`. :param pulumi.Input[str] create_time: The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds. :param pulumi.Input[str] current_location_id: The current zone where the Redis endpoint is placed. For Basic Tier instances, this will always be the same as the [locationId] provided by the user at creation time. For Standard Tier instances, this can be either [locationId] or [alternativeLocationId] and can change after a failover event. :param pulumi.Input[str] display_name: An arbitrary and optional user-provided name for the instance. :param pulumi.Input[str] host: Hostname or IP address of the exposed Redis endpoint used by clients to connect to the service. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Resource labels to represent user provided metadata. :param pulumi.Input[str] location_id: The zone where the instance will be provisioned. If not provided, the service will choose a zone for the instance. For STANDARD_HA tier, instances will be created across two zones for protection against zonal failures. If [alternativeLocationId] is also provided, it must be different from [locationId]. :param pulumi.Input[float] memory_size_gb: Redis memory size in GiB. :param pulumi.Input[str] name: The ID of the instance or a fully qualified identifier for the instance. :param pulumi.Input[str] persistence_iam_identity: Output only. Cloud IAM identity used by import / export operations to transfer data to/from Cloud Storage. Format is "serviceAccount:". The value may change over time for a given instance so should be checked before each import/export operation. :param pulumi.Input[float] port: The port number of the exposed Redis endpoint. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] redis_configs: Redis configuration parameters, according to http://redis.io/topics/config. Please check Memorystore documentation for the list of supported parameters: https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs :param pulumi.Input[str] redis_version: The version of Redis software. If not provided, latest supported version will be used. Currently, the supported values are: - REDIS_5_0 for Redis 5.0 compatibility - REDIS_4_0 for Redis 4.0 compatibility - REDIS_3_2 for Redis 3.2 compatibility :param pulumi.Input[str] region: The name of the Redis region of the instance. :param pulumi.Input[str] reserved_ip_range: The CIDR range of internal addresses that are reserved for this instance. If not provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets in an authorized network. :param pulumi.Input[str] tier: The service tier of the instance. Must be one of these values: - BASIC: standalone instance - STANDARD_HA: highly available primary/replica instances Default value is `BASIC`. Possible values are `BASIC` and `STANDARD_HA`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["alternative_location_id"] = alternative_location_id __props__["authorized_network"] = authorized_network __props__["connect_mode"] = connect_mode __props__["create_time"] = create_time __props__["current_location_id"] = current_location_id __props__["display_name"] = display_name __props__["host"] = host __props__["labels"] = labels __props__["location_id"] = location_id __props__["memory_size_gb"] = memory_size_gb __props__["name"] = name __props__["persistence_iam_identity"] = persistence_iam_identity __props__["port"] = port __props__["project"] = project __props__["redis_configs"] = redis_configs __props__["redis_version"] = redis_version __props__["region"] = region __props__["reserved_ip_range"] = reserved_ip_range __props__["tier"] = tier return Instance(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="alternativeLocationId") def alternative_location_id(self) -> pulumi.Output[str]: """ Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. If provided, it must be a different zone from the one provided in [locationId]. """ return pulumi.get(self, "alternative_location_id") @property @pulumi.getter(name="authorizedNetwork") def authorized_network(self) -> pulumi.Output[str]: """ The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. """ return pulumi.get(self, "authorized_network") @property @pulumi.getter(name="connectMode") def connect_mode(self) -> pulumi.Output[Optional[str]]: """ The connection mode of the Redis instance. Default value is `DIRECT_PEERING`. Possible values are `DIRECT_PEERING` and `PRIVATE_SERVICE_ACCESS`. """ return pulumi.get(self, "connect_mode") @property @pulumi.getter(name="createTime") def create_time(self) -> pulumi.Output[str]: """ The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds. """ return pulumi.get(self, "create_time") @property @pulumi.getter(name="currentLocationId") def current_location_id(self) -> pulumi.Output[str]: """ The current zone where the Redis endpoint is placed. For Basic Tier instances, this will always be the same as the [locationId] provided by the user at creation time. For Standard Tier instances, this can be either [locationId] or [alternativeLocationId] and can change after a failover event. """ return pulumi.get(self, "current_location_id") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[Optional[str]]: """ An arbitrary and optional user-provided name for the instance. """ return pulumi.get(self, "display_name") @property @pulumi.getter def host(self) -> pulumi.Output[str]: """ Hostname or IP address of the exposed Redis endpoint used by clients to connect to the service. """ return pulumi.get(self, "host") @property @pulumi.getter def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource labels to represent user provided metadata. """ return pulumi.get(self, "labels") @property @pulumi.getter(name="locationId") def location_id(self) -> pulumi.Output[str]: """ The zone where the instance will be provisioned. If not provided, the service will choose a zone for the instance. For STANDARD_HA tier, instances will be created across two zones for protection against zonal failures. If [alternativeLocationId] is also provided, it must be different from [locationId]. """ return pulumi.get(self, "location_id") @property @pulumi.getter(name="memorySizeGb") def memory_size_gb(self) -> pulumi.Output[float]: """ Redis memory size in GiB. """ return pulumi.get(self, "memory_size_gb") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The ID of the instance or a fully qualified identifier for the instance. """ return pulumi.get(self, "name") @property @pulumi.getter(name="persistenceIamIdentity") def persistence_iam_identity(self) -> pulumi.Output[str]: """ Output only. Cloud IAM identity used by import / export operations to transfer data to/from Cloud Storage. Format is "serviceAccount:". The value may change over time for a given instance so should be checked before each import/export operation. """ return pulumi.get(self, "persistence_iam_identity") @property @pulumi.getter def port(self) -> pulumi.Output[float]: """ The port number of the exposed Redis endpoint. """ return pulumi.get(self, "port") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @property @pulumi.getter(name="redisConfigs") def redis_configs(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Redis configuration parameters, according to http://redis.io/topics/config. Please check Memorystore documentation for the list of supported parameters: https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs """ return pulumi.get(self, "redis_configs") @property @pulumi.getter(name="redisVersion") def redis_version(self) -> pulumi.Output[str]: """ The version of Redis software. If not provided, latest supported version will be used. Currently, the supported values are: - REDIS_5_0 for Redis 5.0 compatibility - REDIS_4_0 for Redis 4.0 compatibility - REDIS_3_2 for Redis 3.2 compatibility """ return pulumi.get(self, "redis_version") @property @pulumi.getter def region(self) -> pulumi.Output[str]: """ The name of the Redis region of the instance. """ return pulumi.get(self, "region") @property @pulumi.getter(name="reservedIpRange") def reserved_ip_range(self) -> pulumi.Output[str]: """ The CIDR range of internal addresses that are reserved for this instance. If not provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets in an authorized network. """ return pulumi.get(self, "reserved_ip_range") @property @pulumi.getter def tier(self) -> pulumi.Output[Optional[str]]: """ The service tier of the instance. Must be one of these values: - BASIC: standalone instance - STANDARD_HA: highly available primary/replica instances Default value is `BASIC`. Possible values are `BASIC` and `STANDARD_HA`. """ return pulumi.get(self, "tier") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
python
import logging from pkg_resources import DistributionNotFound, get_distribution from feast.infra.offline_stores.bigquery_source import BigQuerySource from feast.infra.offline_stores.contrib.spark_offline_store.spark_source import ( SparkSource, ) from feast.infra.offline_stores.file_source import FileSource from feast.infra.offline_stores.redshift_source import RedshiftSource from feast.infra.offline_stores.snowflake_source import SnowflakeSource from .data_source import KafkaSource, KinesisSource, SourceType from .entity import Entity from .feature import Feature from .feature_service import FeatureService from .feature_store import FeatureStore from .feature_view import FeatureView from .on_demand_feature_view import OnDemandFeatureView from .repo_config import RepoConfig from .request_feature_view import RequestFeatureView from .value_type import ValueType logging.basicConfig( format="%(asctime)s %(levelname)s:%(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", level=logging.INFO, ) try: __version__ = get_distribution(__name__).version except DistributionNotFound: # package is not installed pass __all__ = [ "Entity", "KafkaSource", "KinesisSource", "Feature", "FeatureService", "FeatureStore", "FeatureView", "OnDemandFeatureView", "RepoConfig", "SourceType", "ValueType", "BigQuerySource", "FileSource", "RedshiftSource", "RequestFeatureView", "SnowflakeSource", "SparkSource", ]
python
# -*- coding: utf-8 -*- """ Created on Sun Apr 8 10:41:43 2018 @author: Haosong """ import sys sys.path.append("C:\\CSCI1001_Project\\PrepareData.py") # Replace the thing in the braces above with the current path PrepareData.py is in import PrepareData PrepareData.prepareData()
python
from django.shortcuts import render, get_object_or_404, redirect from rest_framework.views import APIView from rest_framework.response import Response import json from .functions import classify_passenger, load_model class get_classification(APIView): def post(self, request): model = load_model('./api/titanic_model.pk') data = request.data prediction = classify_passenger(model = model, data = data) return(Response(prediction))
python
# -*-coding:utf-8-*- from mobpush.model.BasePush import BasePush class IosNotify(BasePush): serialVersionUID = 6316980682876425791 BADGE_TYPE_SET = 1 BADGE_TYPE_ADD = 2 SLIENT = 1 def __init__(self, title=None, subtitle=None, attachment=None, attachmentType=None, mutableContent=None, contentAvailable=None, slientPush=None, category=None, badgeType=None, badge=None, sound='default'): self.data = { 'title': title, # 标题- 不填写则为应用名称 'subtitle': subtitle, # 副标题 'sound': sound, # APNs通知,通过这个字段指定声音。默认为default,即系统默认声音。 如果设置为空值,则为静音。 # 如果设置为特殊的名称,则需要你的App里配置了该声音才可以正常。 'badge': badge, # 可直接指定 APNs 推送通知的 badge,未设置这个值角标则不带角标推送 'badgeType': badgeType, # badgeAdd=true 时,增加badge对应的角标数,负数时,算减法 # 当这个数值设置了值时,会更新数据库数据 # 未设置这个值角标则不带角标推送 # 1: 绝对值,2: 修改值 'category': category, # 只有IOS8及以上系统才支持此参数推送 'slientPush': slientPush, # 如果只携带content-available: 1,不携带任何badge,sound 和消息内容等参数, # 则可以不打扰用户的情况下进行内容更新等操作即为“Silent Remote Notifications”。 'contentAvailable': contentAvailable, # 将该键设为 1 则表示有新的可用内容。带上这个键值,意味着你的 App 在后台启动了或恢复运行了,application:didReceiveRemoteNotification:fetchCompletionHandler:被调用了。 'mutableContent': mutableContent, # 需要在附加字段中配置相应参数 'attachmentType': attachmentType, # ios富文本0无 ;1 图片 ;2 视频 ;3 音频 'attachment': attachment, } class AndroidNotify(BasePush): def __init__(self, appName=None, title=None, sound=None, warn='12', style=0, content=None): self.data = { 'appName': appName, # 通知标题 'title': title, # 如果不设置,则默认的通知标题为应用的名称。 # max = 20, message = "推送标题最大长度20" 'warn': warn, # warn: 提醒类型: 1提示音;2震动;3指示灯 # 如果多个组合则对应编号组合如:12 标识提示音+震动 'style': style, # 显示样式标识 0、默认通知无; 1、长内容则为内容数据; 2、大图则为图片地址; 3、横幅则为多行内容 # values = {0, 1, 2, 3}, message = "安卓消息格式参数错误" 'content': content, # content: style样式具体内容 'sound': sound, # 自定义声音 } class CustomNotify(BasePush): def __init__(self, customType=None, customTitle=None): self.data = { 'customType': customType, # 自定义消息类型:text 文本消息 'customTitle': customTitle # 自定义类型标题 } class PushNotify(BasePush): def __init__(self, taskCron=0, taskTime=None, plats=[1, 2], iosProduction=1, offlineSeconds=3600, content=None, title=None, type=1, customNotify=None, androidNotify=None, iosNotify=None, url=None, extrasMapList=[]): self.data = { 'taskCron': taskCron, # 是否是定时任务:0否,1是,默认0 'taskTime': taskTime, # 定时消息 发送时间 'speed': 0, # 定速推送, 设置平均每秒推送速度 # 0: 不限制 # 其他限制速度 # 例如: 每秒1条,每秒100条, 建议最小设置为100条 # 这个只是模糊的控制, 只保证推送整体上的平均数值, 比如设置为1, 每5秒推送一条 'plats': plats, # 可使用平台,1 android;2 ios ;3 winphone(暂不使用) ; 'iosProduction': iosProduction, # plat = 2下,0测试环境,1生产环境,默认1 'offlineSeconds': offlineSeconds, # 离线时间,秒 'content': content, # 推送内容 'title': title, # 推送标题 'type': type, # 推送类型:1通知;2自定义 # values = {1, 2}, message = "消息类型1:通知,2:自定义" 'customNotify': customNotify, # 自定义内容, type=2 'androidNotify': androidNotify, # android通知消息, type=1, android 'iosNotify': iosNotify, # ios通知消息, type=1, ios 'url': url, # 打开链接 'extrasMapList': extrasMapList, # 附加字段键值对的方式 }
python
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from itsdangerous import URLSafeSerializer as URLSafeSerializer def generate_auth_token(secret_key, username, password): serializer = Serializer(secret_key, expires_in=15000) token = serializer.dumps({'username': username, "password": password}) return token.decode() def deserialize_auth_token(secret_key, token): serializer = Serializer(secret_key) return serializer.loads(token) def generate_res_token(secret_key, body): serializer = URLSafeSerializer(secret_key) token = serializer.dumps(body) return token def deserialize_res_token(secret_key, token): serializer = URLSafeSerializer(secret_key) return serializer.loads(token)
python
import mlflow from threading import Thread import os import time from sapsan.core.models import ExperimentBackend class MLflowBackend(ExperimentBackend): def __init__(self, name: str = 'experiment', host: str = 'localhost', port: int = 9000): super().__init__(name) self.host = host self.port = port self.mlflow_url = "http://{host}:{port}".format(host=host, port=port) mlflow.set_tracking_uri(self.mlflow_url) try: self.experiment_id = mlflow.set_experiment(name) print("mlflow ui is already running at %s:%s"%(self.host, self.port)) except: print("starting mlflow ui, please wait ...") self.start_ui() self.experiment_id = mlflow.set_experiment(name) print("mlflow ui is running at %s:%s"%(self.host, self.port)) def start_ui(self): mlflow_thread = Thread(target= os.system("mlflow ui --host %s --port %s &"%(self.host, self.port))) mlflow_thread.start() time.sleep(5) def start(self, run_name: str, nested = False): mlflow.start_run(run_name = run_name, nested = nested) def log_metric(self, name: str, value: float): mlflow.log_metric(name, value) def log_parameter(self, name: str, value: str): mlflow.log_param(name, value) def log_artifact(self, path: str): mlflow.log_artifact(path) def close_active_run(self): if mlflow.active_run()!=None: mlflow.end_run() def end(self): mlflow.end_run()
python
#!/usr/bin/env python3 # Copyright 2019 HTCondor Team, Computer Sciences Department, # University of Wisconsin-Madison, WI. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from pathlib import Path import cloudpickle def main(uid, input_file): func_path = Path.cwd() / f'{uid}.func' with func_path.open(mode = 'rb') as f: func = cloudpickle.load(f) input_file_path = Path.cwd() / Path(input_file).name output_file_path = Path.cwd() / f'{uid}.output' func(input_file_path, output_file_path) if __name__ == '__main__': main(uid = sys.argv[1], input_file = sys.argv[2])
python
from core.buckets import BucketExtend from core.sampler import Sampler class LowDiscrepancySampler(Sampler): def __init__(self, bucket_extend: BucketExtend, samples_count: int, shutterOpen: float, shutterClose: float): super().__init__(bucket_extend, samples_count, shutterOpen, shutterClose) self.samples_count = samples_count self.pos_x = self.bucket_extend.start_x self.pos_y = self.bucket_extend.start_y self.image_samples = [float, float] * samples_count self.lens_samples = [float, float] * samples_count self.time_samples = [float] * samples_count self.sample_pos = 0
python
#!/usr/bin/env python # coding: utf-8 # In[2]: from pathlib import Path import numpy as np import pandas as pd train = pd.read_csv("corpus/imdb/labeledTrainData.tsv", header=0, delimiter="\t", quoting=3) test = pd.read_csv("corpus/imdb/testData.tsv", header=0, delimiter="\t", quoting=3) train_texts = train["review"].tolist() train_labels = train["sentiment"].tolist() test_texts = test["review"].tolist() from sklearn.model_selection import train_test_split train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2) from transformers import DistilBertTokenizerFast tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased') train_encodings = tokenizer(train_texts, truncation=True, padding=True) val_encodings = tokenizer(val_texts, truncation=True, padding=True) test_encodings = tokenizer(test_texts, truncation=True, padding=True) import tensorflow as tf train_dataset = tf.data.Dataset.from_tensor_slices(( dict(train_encodings), train_labels )) val_dataset = tf.data.Dataset.from_tensor_slices(( dict(val_encodings), val_labels )) # test_labels = [1]*len(test1) test_dataset = tf.data.Dataset.from_tensor_slices(( dict(test_encodings) )) from transformers import TFDistilBertForSequenceClassification model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased') # In[3]: optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5) model.compile(optimizer=optimizer, loss=model.compute_loss) # can also use any keras loss fn # In[4]: history = model.fit(train_dataset.batch(5), epochs=5) # In[5]: model.evaluate(val_dataset.batch(5)) # In[6]: labels_pred = model.predict(test_dataset.batch(5)) # In[9]: from matplotlib import pyplot as plt plt.plot(history.history['acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # 绘制训练 & 验证的损失值 plt.plot(history.history['loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # In[10]: y = labels_pred.logits y_pred = np.argmax(y,axis = 1) # In[15]: y # In[12]: y_pred # In[13]: result_output = pd.DataFrame(data={"id": test["id"], "sentiment": y_pred}) result_output.to_csv("bert.csv", index=False, quoting=3) # In[14]: model.save("TFDistilBertForSequenceClassification")
python
# Available debug categories. DEBUG_CATEGORIES = { 'architects': False, 'callbacks': False, 'controllers': False, 'drivers': False, 'emitters': False, 'imap': False, 'managers': False, 'workers': False, 'all': False, } # Default categories for the 'all' keyword. DEBUG_ALL_CATEGORIES = [ 'callbacks', 'controllers', 'drivers', 'emitters', 'imap', 'managers', 'workers', ] ARC = 'architects' CLB = 'callbacks' CTL = 'controllers' DRV = 'drivers' EMT = 'emitters' MGR = 'managers' WRK = 'workers' IMAP = 'imap' # Time to sleep for a response of another worker. This value is used by the edmp # module where appropriate. This allows not eating too much CPU. #TODO: expose to the rascal. SLEEP = 0.02
python
class Solution: def sortColors(self, nums): return nums.sort() if __name__ == '__main__': nums = [0, 1, 2, 2, 1, 1, 2, 2, 0, 0, 0, 0, 2, 1] print("Before Sort: ") print(nums) # [0, 1, 2, 2, 1, 1, 2, 2, 0, 0, 0, 0, 2, 1] Solution().sortColors(nums) print("After Sort: ") print(nums) # [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]
python
#!/usr/bin/env python3 """ Check Lisp examples in a Markdown file. To run (assuming this repo is a submodule in a dir called .cl-make): $ pip3 install -r .cl-make/requirements.txt $ .cl-make/readme.py README.md The return code is zero iff all Lisp examples in the file run without errors in an SBCL REPL and their outputs match the given outputs. Such output can be specified in a language-less code block immediately following the Lisp code block. The whole REPL session is printed to stdout. If the REPL session exits unexpectedly, or any evaluation takes longer than 30 seconds, or an error occurs, or the output doesn't match, then a descriptive error message is printed to stderr and an exit code of 1 is returned. A standalone Lisp file is created to reproduce the environment for the failing Lisp form, and all this reproduction information is included in the error message. This script uses pytest internally, and thus can also return other exit codes: https://docs.pytest.org/en/6.0.1/usage.html#possible-exit-codes """ import argparse import difflib import logging import os import pathlib import sys import tempfile import marko.block as block from marko.ext.gfm import gfm import pexpect import pytest def pairwise(things): """ Return a list of pairs of adjacent elements from things. The last element of this list is the pair (things[-1], None). >>> list(pairwise(['a', 'b', 'c'])) [('a', 'b'), ('b', 'c'), ('c', None)] >>> list(pairwise([])) [] """ return zip(things, things[1:] + [None]) def is_code_block(element): """ Return truthy iff the Marko element is a code block. >>> is_code_block(gfm.parse(''' foo''').children[0]) True >>> is_code_block(gfm.parse('''``` ... bar ... ```''').children[0]) True >>> is_code_block(gfm.parse('''> baz''').children[0]) False """ types = [block.CodeBlock, block.FencedCode] return any(isinstance(element, t) for t in types) def code_block_to_dict(code_block): r""" Return a dict of the lang and text of the Marko code block. >>> code_block_to_dict(gfm.parse('''```lisp ... (+ 2 ... 2) ... ```''').children[0]) {'lang': 'lisp', 'text': '(+ 2\n 2)\n'} >>> code_block_to_dict(gfm.parse(''' foo''').children[0]) {'lang': '', 'text': 'foo\n'} """ return { 'lang': code_block.lang, # should only have one child but just in case; also, children of # the child is just a string holding the text 'text': ''.join(child.children for child in code_block.children), } def slurp(filename): """ Return the contents of filename as a string. >>> 'public domain' in slurp('LICENSE.txt') True """ with open(filename) as file: return file.read() def lisp_examples(element): r""" Return a list of all Lisp examples in the Marko element. A Lisp example is a code block whose language is 'lisp', and is returned as a dictionary whose key 'code' holds the text of that code block. If the Lisp code block is immediately followed by another code block whose language is the empty string, then the text of that second block is also included in the dictionary, under the key 'output'. >>> from pprint import pprint >>> examples = lisp_examples(gfm.parse(slurp('test/example.md'))) >>> pprint(examples, width=68) [{'code': '(format t "Hello, world 1!")\n', 'output': 'Hello, world 1!\nNIL\n'}, {'code': '(format t "Hello, world 4!")\n', 'output': 'Hello, world 4!\nNIL\n'}, {'code': '(format nil "Hello, world 5!")\n'}] """ examples = [] if hasattr(element, 'children'): children = element.children # sometimes the children are just a string holding the text if isinstance(children, list): # don't let blank lines get in the middle of an example pared = [x for x in children if not isinstance(x, block.BlankLine)] for a, b in pairwise(pared): if is_code_block(a): code = code_block_to_dict(a) if code['lang'] == 'lisp': example = {'code': code['text']} if is_code_block(b): output = code_block_to_dict(b) if not output['lang']: example['output'] = output['text'] examples.append(example) else: # will safely skip when a has no grandchildren examples.extend(lisp_examples(a)) return examples def quicklisp(): """ Return the path to the Quicklisp directory. """ # Quicklisp sets this variable on installation if 'QUICK_LISP' in os.environ: return os.environ['QUICK_LISP'] else: # but it doesn't show up in a Docker image without using ENV, so # in particular SEL doesn't have $QUICK_LISP at time of writing return f'{os.environ["HOME"]}/quicklisp' # regex matching the default SBCL prompt, only at the start of a line prompt = r'(?<![^\n])\* ' # possibilities when we eval patterns = [prompt, pexpect.EOF, pexpect.TIMEOUT] class ExitException(Exception): pass class TimeoutException(Exception): pass class MismatchException(Exception): def __init__(self, actual): self.actual = actual class ReadmeItem(pytest.Item): def __init__(self, name, parent, code, output): super().__init__(name, parent) self.code = code self.output = output def runtest(self): code = self.code repl.send(code) index = repl.expect(patterns) # Pexpect returns CR/LF actual = repl.before.replace('\r\n', '\n') # print nicely as if input/output were in actual REPL session logging.info('* ' + '\n '.join(code.splitlines()) + f'\n{actual}') if index == patterns.index(pexpect.EOF): raise ExitException() elif index == patterns.index(pexpect.TIMEOUT): # the error is (?) shown in the log to stdout raise TimeoutException() else: expected = self.output if expected and expected != actual: # the actual output is (?) shown in the log to stdout raise MismatchException(actual) else: # track all the forms we successfully evaluate up until # the first error (if any) forms.append(code) def reportinfo(self): return self.fspath, 0, f'[readme] Lisp example #{self.name}' def repr_failure(self, excinfo): tmp = tempfile.NamedTemporaryFile( mode='w', suffix='.lisp', prefix=f'{pathlib.Path(self.parent.fspath).stem}_', delete=False, ) repro = tmp.name tmp.write('\n'.join(forms)) tmp.close() if isinstance(excinfo.value, ExitException): reason = 'Exited REPL unexpectedly.\n' if isinstance(excinfo.value, TimeoutException): # the error is shown in the log to stdout reason = 'Timeout: either took too long or an error occurred.\n' if isinstance(excinfo.value, MismatchException): diff = list(difflib.ndiff( self.output.splitlines(keepends=True), excinfo.value.actual.splitlines(keepends=True), )) # the full actual output is shown in the log to stdout reason = ' '.join( ['Differences (ndiff with -expected +actual):\n\n'] + diff ) return '\n'.join([ reason, 'To reproduce this in a REPL, first evaluate all the forms up to', 'but not including this one by running the following command:', '', f' sbcl --load {repro}', '', 'Then evaluate the erroneous form:', '', ] + [f' {line}' for line in self.code.splitlines()]) class ReadmeFile(pytest.File): def collect(self): examples = lisp_examples(gfm.parse(slurp(self.fspath))) for index, example in enumerate(examples): yield ReadmeItem.from_parent( self, name=str(index+1), code=example['code'], # mandatory output=example.get('output'), # might not be present ) class ReadmePlugin: def pytest_collect_file(self, parent, path): # we don't check the path because our pytest invocation # specifies only one file, and we assume user gave us Markdown return ReadmeFile.from_parent(parent, fspath=path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--timeout', type=float, help='seconds allowed for each REPL form', ) parser.add_argument('file', help='a Markdown file name') cli_args = parser.parse_args() # aggregate all the forms that we evaluate successfully, so that if # an error occurs, the user can easily reproduce it forms = [] # Quicklisp isn't present by default in a raw SBCL in the Docker # image, but it is installed already so we just need to load it args = ['--load', f'{quicklisp()}/setup.lisp'] repl = pexpect.spawn( 'sbcl', args, echo=False, # otherwise we have to strip input from repl.before encoding='utf-8', # otherwise repl.before gives binary strings timeout=cli_args.timeout, ) # nothing should go wrong before we eval anything repl.expect(prompt) exit_code = pytest.main( ['--exitfirst', # the REPL can get messed up if error or exit '--log-cli-level=INFO', # print every input and output '--log-format=%(message)s', '--show-capture=no', # don't reprint input/output on failure '--', # don't choke on filenames starting with dashes cli_args.file], plugins=[ReadmePlugin()] ) sys.exit(exit_code)
python
#!/usr/bin/python import serial import time import sys if len(sys.argv) != 2: print "Usage: %s <serial port>" % sys.argv[0] sys.exit() def getResponse(): time.sleep(0.25) s = ser.readline() print "RECV: " print s if "NMI:" in s: print "NMI signal received" #sys.exit() s = ser.readline() print "RECV: " print s if "IRQ:" in s: print "IRQ signal received" s = ser.readline() print "RECV: " print s ser = serial.Serial(sys.argv[1], 115200, timeout=5) getResponse() # initial ready message for i in range(99): ser.write(b"WD000%02X\n" % i) getResponse() ser.write(b"WD00100\n") getResponse() ser.write(b"WD001FF\n") getResponse() ser.write(b"WD0003F\n") getResponse() ser.write(b"WD00100\n") getResponse() ser.write(b"WD001FF\n") getResponse() ser.close()
python
import cfscrape from flask import request from flask_restplus import Resource, Namespace, fields, abort from Servers.AnimeFLV.scraper import getList, scrapeEpisodeList, scrapeEpisode, scrapeGenre, scrapeGenreList, scrapeFeed, scrapeLastAnimeAdded cfscraper = cfscrape.create_scraper(delay=10) animeflv_api = Namespace('AnimeFLV', description='AnimeFLV API') search_model = animeflv_api.model('Search AnimeFLV', { 'value': fields.String, 'page': fields.Integer }) episodes_list_model = animeflv_api.model('Episodes List AnimeFLV', { 'last_id': fields.Integer, 'slug': fields.String, 'page': fields.Integer }) watch_episode_model = animeflv_api.model('Watch Episode AnimeFLV', { 'id_episode': fields.Integer, 'slug': fields.String, 'no_episode': fields.Integer }) genre_model = animeflv_api.model('Genre search AnimeFLV', { 'type': fields.String, 'page': fields.Integer }) @animeflv_api.route('/') class Home(Resource): @animeflv_api.doc(description='Index endpoint', responses={200: 'Server is OK'}) def get(self): return {'server': 'AnimeFLV'} @animeflv_api.route('/search') class Search(Resource): @animeflv_api.expect(search_model) @animeflv_api.doc(description='Search for an anime in AnimeFLV', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }, params={ 'value': 'String to search in AnimeFLV', 'page': 'Current page' }) def post(self): params = request.get_json() anime_name = params['value'].lower() page = params['page'] if not anime_name or not page: abort(400, 'Bad request') try: anime_list = getList() directory = [anime for anime in anime_list if anime_name in anime['title'].lower()] page-=1 length = len(directory) start_range = page * 24 end_range = start_range + 24 if start_range + 24 < length else length filtered_anime = [directory[i] for i in range(start_range, end_range)] return filtered_anime except: abort(500, 'Something ocurred while searching the anime') @animeflv_api.route('/episodes') class Episodes(Resource): @animeflv_api.expect(episodes_list_model) @animeflv_api.doc(description='Search an anime episodes list', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }, params={ 'last_id': 'Anime last Id', 'slug': 'Anime name used in AnimeFLV endpoint', 'page': 'Current page' }) def post(self): params = request.get_json() last_id = params['last_id'] slug = params['slug'] page = params['page'] if not slug or not last_id or not page: abort(400, 'Bad request') try: episodes = scrapeEpisodeList(last_id, slug) page-=1 length = len(episodes) start_range = page * 24 end_range = start_range + 24 if start_range + 24 < length else length results = [episodes[i] for i in range(start_range, end_range)] return results except: abort(500, 'Something ocurred while retrieving the episodes list') @animeflv_api.route('/watch') class Watch(Resource): @animeflv_api.expect(watch_episode_model) @animeflv_api.doc(description='Get episode streaming options', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }, params={ 'id_episode': 'Episode id', 'slug': 'Anime name used in AnimeFLV endpoint', 'no_episode': 'Eposide number' }) def post(self): params = request.get_json() id_episode = params['id_episode'] slug = params['slug'] no_episode = params['no_episode'] if not id_episode or not slug or not no_episode: abort(400, 'Bad request') try: return scrapeEpisode(id_episode, slug, no_episode) except: abort(500, 'Something ocurred while retrieving streaming options') @animeflv_api.route('/genre') class Genre(Resource): @animeflv_api.expect(genre_model) @animeflv_api.doc(description='Get animes related with specific genre', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }, params={ 'type': 'Genre type', 'page': 'Current page' }) def post(self): params = request.get_json() genre_type = params['type'] page = params['page'] if not genre_type or not page: abort(400, 'Bad request') try: return scrapeGenre(genre_type, page) except: abort(500, 'Something ocurred while retrieving animes') @animeflv_api.route('/genre/list') class GenreList(Resource): @animeflv_api.doc(description='Get genre list', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }) def get(self): try: return scrapeGenreList() except: abort(500, 'Something ocurred while retrieving genre list') @animeflv_api.route('/feed') class Feed(Resource): @animeflv_api.doc(description='Get today feed', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }) def get(self): try: return scrapeFeed() except: abort(500, 'Something ocurred while retrieving today feed') @animeflv_api.route('/last') class LastAnimeAdded(Resource): @animeflv_api.doc(description='Get last anime added', responses={ 200: 'Request was successful', 400: 'Bad request', 500: 'Internal server error' }) def get(self): try: return scrapeLastAnimeAdded() except: abort(500, 'Something ocurred while retrieving last anime added')
python
from threading import Lock from twisted.internet import protocol, reactor class ClientProtocol(protocol.Protocol): def dataReceived(self, data): self.server_protocol.transport.write(data) def connectionLost(self, reason): self.server_protocol.transport.loseConnection() class ClientFactory(protocol.ClientFactory): def __init__(self, server_protocol): self.server_protocol = server_protocol def buildProtocol(self, addr): client_protocol = ClientProtocol() client_protocol.server_protocol = self.server_protocol self.server_protocol.client_protocol = client_protocol return client_protocol class ServerProtocol(protocol.Protocol): def __init__(self, dst_ip, dst_port): self.dst_ip = dst_ip self.dst_port = dst_port self.client_protocol = None self.buffer = [] def connectionMade(self): reactor.connectTCP(self.dst_ip, self.dst_port, ClientFactory(self)) def dataReceived(self, data): self.buffer.append(data) self.sendData() def sendData(self): if not self.client_protocol: reactor.callLater(0.5, self.sendData) return for packet in self.buffer: self.client_protocol.transport.write(packet) self.buffer = [] def connectionLost(self, reason): if self.client_protocol: self.client_protocol.transport.loseConnection() class ServerFactory(protocol.Factory): def __init__(self, dst_ip, dst_port): self.dst_ip = dst_ip self.dst_port = dst_port def buildProtocol(self, addr): return ServerProtocol(self.dst_ip, self.dst_port) class NATService: """ This service provides a NAT-like service when the backend pool is located in a remote machine. Guests are bound to a local IP (e.g., 192.168.150.0/24), and so not accessible from a remote Cowrie. This class provides TCP proxies that associate accessible IPs in the backend pool's machine to the internal IPs used by guests, like a NAT. """ def __init__(self): self.bindings = {} self.lock = Lock() # we need to be thread-safe just in case, this is accessed from multiple clients def request_binding(self, guest_id, dst_ip, ssh_port, telnet_port): self.lock.acquire() try: # see if binding is already created if dst_ip in self.bindings: # increase connected self.bindings[guest_id][0] += 1 return self.bindings[guest_id][1]._realPortNumber, self.bindings[guest_id][2]._realPortNumber else: nat_ssh = reactor.listenTCP(0, ServerFactory(dst_ip, ssh_port), interface='0.0.0.0') nat_telnet = reactor.listenTCP(0, ServerFactory(dst_ip, telnet_port), interface='0.0.0.0') self.bindings[guest_id] = [0, nat_ssh, nat_telnet] return nat_ssh._realPortNumber, nat_telnet._realPortNumber finally: self.lock.release() def free_binding(self, guest_id): self.lock.acquire() try: self.bindings[guest_id][0] -= 1 # stop listening if no-one connected if self.bindings[guest_id][0] == 0: self.bindings[guest_id][1].stopListening() self.bindings[guest_id][2].stopListening() finally: self.lock.release()
python
from pathlib import Path from collections import defaultdict import sys TEST_MODE = bool(len(sys.argv) > 1 and sys.argv[1] == "test") CARD = ['E', 'S', 'W', 'N'] DIRECTIONS = [(1,0),(0,1),(-1,0),(0,-1)] ROTATIONS = [(1,0,0,1),(0,-1,1,0),(-1,0,0,-1),(0,1,-1,0)] def phase1(data): pos = [0,0] facing = 0 for l, val in data: if l in CARD: pos[0] += DIRECTIONS[CARD.index(l)][0] * val pos[1] += DIRECTIONS[CARD.index(l)][1] * val elif l == 'F': pos[0] += DIRECTIONS[facing][0] * val pos[1] += DIRECTIONS[facing][1] * val elif l == 'L': facing = (facing - val//90) % 4 elif l == 'R': facing = (facing + val//90) % 4 return abs(pos[0])+abs(pos[1]) def phase2(data): pos = [0,0] wp = [10,-1] for l, val in data: if l in CARD: wp[0] += DIRECTIONS[CARD.index(l)][0] * val wp[1] += DIRECTIONS[CARD.index(l)][1] * val elif l == 'F': pos[0] += wp[0] * val pos[1] += wp[1] * val else: direction = 1 if l == 'R' else -1 matrix = ROTATIONS[direction*val//90] wp = [wp[0]*matrix[0]+wp[1]*matrix[1],wp[0]*matrix[2]+wp[1]*matrix[3]] return abs(pos[0])+abs(pos[1]) if __name__ == "__main__": with Path(__file__).parent.joinpath("input/day12_sample" if TEST_MODE else "input/day12").open() as f: INSTRUCTIONS = [(line[0], int(line[1:].strip())) for line in f] print(f'Phase 1: {phase1(INSTRUCTIONS)}') print(f'Phase 2: {phase2(INSTRUCTIONS)}')
python
import pytest from rotkehlchen.tests.utils.ethereum import ETHEREUM_TEST_PARAMETERS @pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS) def test_get_block_by_number(ethereum_manager): block = ethereum_manager.get_block_by_number(10304885) assert block['timestamp'] == 1592686213 assert block['number'] == 10304885 assert block['hash'] == '0xe2217ba1639c6ca2183f40b0f800185b3901faece2462854b3162d4c5077752c' @pytest.mark.parametrize(*ETHEREUM_TEST_PARAMETERS) def test_get_transaction_receipt(ethereum_manager): result = ethereum_manager.get_transaction_receipt( '0x12d474b6cbba04fd1a14e55ef45b1eb175985612244631b4b70450c888962a89', ) block_hash = '0x6f3a7838a8788c3371b88df170c3643d19bad896c915a7368681292882b6ad61' assert result['blockHash'] == block_hash assert len(result['logs']) == 2 assert result['gasUsed'] == '0x232ae'
python
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved. import os import random import numpy as np import torch def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ['PYTHONHASHSEED'] = str(seed)
python
# Copyright 2006-2012 Mark Diekhans import re from pycbio.hgdata.autoSql import intArraySplit, intArrayJoin, strArraySplit, strArrayJoin from pycbio.tsv.tabFile import TabFileReader from pycbio.hgdata import dnaOps from pycbio.hgdata.cigar import ExonerateCigar from collections import defaultdict from deprecation import deprecated # FIXME: drop sequence support, it is almost never used # Notes: # - terms plus and minus are used because `positive' is long and `pos' abbreviation is # often used for position. def reverseCoords(start, end, size): return (size - end, size - start) def reverseStrand(s): "return reverse of a strand character" return "+" if (s == "-") else "-" def dropQueryUniq(qName): """if a suffix in the form -[0-9]+(.[0-9]+)? is append to make the name unique, drop it""" return re.match('^(.+?)(-[0-9]+(.[0-9]+)*)?$', qName).group(1) class PslBlock(object): """Block of a PSL""" __slots__ = ("psl", "iBlk", "qStart", "tStart", "size", "qSeq", "tSeq") def __init__(self, qStart, tStart, size, qSeq=None, tSeq=None): "sets iBlk base on being added in ascending order" self.psl = None self.iBlk = None self.qStart = qStart self.tStart = tStart self.size = size self.qSeq = qSeq self.tSeq = tSeq def __len__(self): return self.size def __str__(self): return "{}..{} <=> {}..{}".format(self.qStart, self.qEnd, self.tStart, self.tEnd) @property def qEnd(self): return self.qStart + self.size @property def tEnd(self): return self.tStart + self.size @property def qStartPlus(self): "get qStart for the block on positive strand" if self.psl.qStrand == '+': return self.qStart else: return self.psl.qSize - self.qEnd @property def qEndPlus(self): "get qEnd for the block on positive strand" if self.psl.qStrand == '+': return self.qEnd else: return self.psl.qSize - self.qStart @property def tStartPlus(self): "get tStart for the block on positive strand" if self.psl.tStrand == '+': return self.tStart else: return self.psl.tSize - self.tEnd @property def tEndPlus(self): "get tEnd for the block on positive strand" if self.psl.tStrand == '+': return self.tEnd else: return self.psl.tSize - self.tStart @deprecated() def getQStartPos(self): return self.qStartPlus @deprecated() def getQEndPos(self): return self.qEndPlus @deprecated() def getTStartPos(self): return self.tStartPlus @deprecated() def getTEndPos(self): return self.tEndPlus def sameAlign(self, other): "compare for equality of alignment." return (other is not None) and (self.qStart == other.qStart) and (self.tStart == other.tStart) and (self.size == other.size) and (self.qSeq == other.qSeq) and (self.tSeq == other.tSeq) def reverseComplement(self, newPsl): "construct a block that is the reverse complement of this block" return PslBlock(self.psl.qSize - self.qEnd, self.psl.tSize - self.tEnd, self.size, (dnaOps.reverseComplement(self.qSeq) if (self.qSeq is not None) else None), (dnaOps.reverseComplement(self.tSeq) if (self.tSeq is not None) else None)) def swapSides(self, newPsl): "construct a block with query and target swapped " return PslBlock(self.tStart, self.qStart, self.size, self.tSeq, self.qSeq) def swapSidesReverseComplement(self, newPsl): "construct a block with query and target swapped and reverse complemented " return PslBlock(self.psl.tSize - self.tEnd, self.psl.qSize - self.qEnd, self.size, (dnaOps.reverseComplement(self.tSeq) if (self.tSeq is not None) else None), (dnaOps.reverseComplement(self.qSeq) if (self.qSeq is not None) else None)) class Psl(object): """Object containing data from a PSL record.""" __slots__ = ("match", "misMatch", "repMatch", "nCount", "qNumInsert", "qBaseInsert", "tNumInsert", "tBaseInsert", "strand", "qName", "qSize", "qStart", "qEnd", "tName", "tSize", "tStart", "tEnd", "blocks") @classmethod def _parseBlocks(cls, psl, blockCount, blockSizesStr, qStartsStr, tStartsStr, qSeqsStr, tSeqsStr): "convert parallel arrays to PslBlock objects" blockSizes = intArraySplit(blockSizesStr) qStarts = intArraySplit(qStartsStr) tStarts = intArraySplit(tStartsStr) haveSeqs = (qSeqsStr is not None) if haveSeqs: qSeqs = strArraySplit(qSeqsStr) tSeqs = strArraySplit(tSeqsStr) for i in range(blockCount): psl.addBlock(PslBlock(qStarts[i], tStarts[i], blockSizes[i], (qSeqs[i] if haveSeqs else None), (tSeqs[i] if haveSeqs else None))) def __init__(self, qName=None, qSize=0, qStart=0, qEnd=0, tName=None, tSize=0, tStart=0, tEnd=0, strand=None): "create a new PSL with no blocks" self.match = 0 self.misMatch = 0 self.repMatch = 0 self.nCount = 0 self.qNumInsert = 0 self.qBaseInsert = 0 self.tNumInsert = 0 self.tBaseInsert = 0 self.strand = strand self.qName = qName self.qSize = qSize self.qStart = qStart self.qEnd = qEnd self.tName = tName self.tSize = tSize self.tStart = tStart self.tEnd = tEnd self.blocks = [] @classmethod def fromRow(cls, row): """"Create PSL from a text row of columns, usually split from a tab file line""" psl = Psl(qName=row[9], qSize=int(row[10]), qStart=int(row[11]), qEnd=int(row[12]), tName=row[13], tSize=int(row[14]), tStart=int(row[15]), tEnd=int(row[16]), strand=row[8]) psl.match = int(row[0]) psl.misMatch = int(row[1]) psl.repMatch = int(row[2]) psl.nCount = int(row[3]) psl.qNumInsert = int(row[4]) psl.qBaseInsert = int(row[5]) psl.tNumInsert = int(row[6]) psl.tBaseInsert = int(row[7]) blockCount = int(row[17]) haveSeqs = len(row) > 21 cls._parseBlocks(psl, blockCount, row[18], row[19], row[20], (row[21] if haveSeqs else None), (row[22] if haveSeqs else None)) return psl @classmethod def fromDbRow(cls, row, dbColIdxMap): """"Create PSL from a database row""" # FIXME: change to use DictCursor psl = Psl(qName=row[dbColIdxMap["qName"]], qSize=row[dbColIdxMap["qSize"]], qStart=row[dbColIdxMap["qStart"]], qEnd=row[dbColIdxMap["qEnd"]], tName=row[dbColIdxMap["tName"]], tSize=row[dbColIdxMap["tSize"]], tStart=row[dbColIdxMap["tStart"]], tEnd=row[dbColIdxMap["tEnd"]], strand=row[dbColIdxMap["strand"]],) psl.match = row[dbColIdxMap["matches"]] psl.misMatch = row[dbColIdxMap["misMatches"]] psl.repMatch = row[dbColIdxMap["repMatches"]] psl.nCount = row[dbColIdxMap["nCount"]] psl.qNumInsert = row[dbColIdxMap["qNumInsert"]] psl.qBaseInsert = row[dbColIdxMap["qBaseInsert"]] psl.tNumInsert = row[dbColIdxMap["tNumInsert"]] psl.tBaseInsert = row[dbColIdxMap["tBaseInsert"]] blockCount = row[dbColIdxMap["blockCount"]] haveSeqs = "qSeqs" in dbColIdxMap cls._parseBlocks(psl, blockCount, row[dbColIdxMap["blockSizes"]], row[dbColIdxMap["qStarts"]], row[dbColIdxMap["tStarts"]], (row[dbColIdxMap["qSeqs"]] if haveSeqs else None), (row[dbColIdxMap["tSeqs"]] if haveSeqs else None)) return psl @classmethod def create(cls, qName=None, qSize=0, qStart=0, qEnd=0, tName=None, tSize=0, tStart=0, tEnd=0, strand=None): "create a new PSL" psl = Psl(qName=qName, qSize=qSize, qStart=qStart, qEnd=qEnd, tName=tName, tSize=tSize, tStart=tStart, tEnd=tEnd, strand=strand) return psl def addBlock(self, blk): blk.psl = self blk.iBlk = len(self.blocks) self.blocks.append(blk) @property def blockCount(self): return len(self.blocks) @property def qStrand(self): return self.strand[0] @property def tStrand(self): return (self.strand[1] if len(self.strand) > 1 else "+") @deprecated() def getQStrand(self): return self.qStrand @deprecated() def getTStrand(self): return self.tStrand @deprecated() def qRevRange(self, start, end): "reverse a query range to the other strand (dropping, this is dumb)" return (self.qSize - end, self.qSize - start) @deprecated() def tRevRange(self, start, end): "reverse a query range to the other strand (dropping, this is dumb)" return (self.tSize - end, self.tSize - start) @deprecated() def qRangeToPos(self, start, end): "convert a query range in alignment coordinates to positive strand coordinates" if self.qStrand == "+": return (start, end) else: return (self.qSize - end, self.qSize - start) @deprecated() def tRangeToPos(self, start, end): "convert a target range in alignment coordinates to positive strand coordinates" if self.tStrand == "+": return (start, end) else: return (self.tSize - end, self.tSize - start) def isProtein(self): lastBlock = self.blockCount - 1 if len(self.strand) < 2: return False return (((self.strand[1] == '+') and (self.tEnd == self.tStarts[lastBlock] + 3 * self.blockSizes[lastBlock])) or ((self.strand[1] == '-') and (self.tStart == (self.tSize - (self.tStarts[lastBlock] + 3 * self.blockSizes[lastBlock]))))) @property def tLength(self): return self.tEnd - self.tStart @property def qLength(self): return self.qEnd - self.qStart def tOverlap(self, tName, tStart, tEnd): "test for overlap of target range" return (tName == self.tName) and (tStart < self.tEnd) and (tEnd > self.tStart) def tBlkOverlap(self, tStart, tEnd, iBlk): "does the specified block overlap the target range" return (tStart < self.getTEndPos(iBlk)) and (tEnd > self.getTStartPos(iBlk)) def toRow(self): "convert PSL to array of strings" row = [str(self.match), str(self.misMatch), str(self.repMatch), str(self.nCount), str(self.qNumInsert), str(self.qBaseInsert), str(self.tNumInsert), str(self.tBaseInsert), self.strand, self.qName, str(self.qSize), str(self.qStart), str(self.qEnd), self.tName, str(self.tSize), str(self.tStart), str(self.tEnd), str(self.blockCount), intArrayJoin([b.size for b in self.blocks]), intArrayJoin([b.qStart for b in self.blocks]), intArrayJoin([b.tStart for b in self.blocks])] if self.blocks[0].qSeq is not None: row.append(strArrayJoin([b.qSeq for b in self.blocks])) row.append(strArrayJoin([b.tSeq for b in self.blocks])) return row def __str__(self): "return psl as a tab-separated string" return "\t".join(self.toRow()) def write(self, fh): """write psl to a tab-seperated file""" fh.write(str(self)) fh.write('\n') @staticmethod def queryKey(psl): "sort key using query address" return (psl.qName, psl.qStart, psl.qEnd) @staticmethod def targetKey(psl): "sort key using target address" return (psl.tName, psl.tStart, psl.tEnd) def __eq__(self, other): "compare for equality of alignment" if ((not isinstance(other, self.__class__)) or (self.match != other.match) or (self.misMatch != other.misMatch) or (self.repMatch != other.repMatch) or (self.nCount != other.nCount) or (self.qNumInsert != other.qNumInsert) or (self.qBaseInsert != other.qBaseInsert) or (self.tNumInsert != other.tNumInsert) or (self.tBaseInsert != other.tBaseInsert) or (self.strand != other.strand) or (self.qName != other.qName) or (self.qSize != other.qSize) or (self.qStart != other.qStart) or (self.qEnd != other.qEnd) or (self.tName != other.tName) or (self.tSize != other.tSize) or (self.tStart != other.tStart) or (self.tEnd != other.tEnd) or (self.blockCount != other.blockCount)): return False for i in range(self.blockCount): if not self.blocks[i].sameAlign(other.blocks[i]): return False return True def __ne__(self, other): return not self.__eq__(other) def sameAlign(self, other): "compare for equality of alignment. The stats fields are not compared." if ((other is None) or (self.strand != other.strand) or (self.qName != other.qName) or (self.qSize != other.qSize) or (self.qStart != other.qStart) or (self.qEnd != other.qEnd) or (self.tName != other.tName) or (self.tSize != other.tSize) or (self.tStart != other.tStart) or (self.tEnd != other.tEnd) or (self.blockCount != other.blockCount)): return False for i in range(self.blockCount): if not self.blocks[i].sameAlign(other.blocks[i]): return False return True def __hash__(self): return hash(self.tName) + hash(self.tStart) def identity(self): # FIXME: make property aligned = float(self.match + self.misMatch + self.repMatch) if aligned == 0.0: return 0.0 # just matches Ns else: return (self.match + self.repMatch) / aligned def basesAligned(self): # FIXME: make property return self.match + self.misMatch + self.repMatch def queryAligned(self): # FIXME: make property return (self.match + self.misMatch + self.repMatch) / self.qSize def reverseComplement(self): "create a new PSL that is reverse complemented" rc = Psl(qName=self.qName, qSize=self.qSize, qStart=self.qStart, qEnd=self.qEnd, tName=self.tName, tSize=self.tSize, tStart=self.tStart, tEnd=self.tEnd, strand=reverseStrand(self.qStrand) + reverseStrand(self.tStrand)) rc.match = self.match rc.misMatch = self.misMatch rc.repMatch = self.repMatch rc.nCount = self.nCount rc.qNumInsert = self.qNumInsert rc.qBaseInsert = self.qBaseInsert rc.tNumInsert = self.tNumInsert rc.tBaseInsert = self.tBaseInsert for i in range(self.blockCount - 1, -1, -1): rc.addBlock(self.blocks[i].reverseComplement(rc)) return rc def _swapStrand(self, keepTStrandImplicit, doRc): # don't make implicit if already explicit if keepTStrandImplicit and (len(self.strand) == 1): qs = reverseStrand(self.tStrand) if doRc else self.tStrand ts = "" else: # swap and make|keep explicit qs = self.tStrand ts = self.qStrand return qs + ts def swapSides(self, keepTStrandImplicit=False): """Create a new PSL with target and query swapped, If keepTStrandImplicit is True the psl has an implicit positive target strand, reverse complement to keep the target strand positive and implicit. If keepTStrandImplicit is False, don't reverse complement untranslated alignments to keep target positive strand. This will make the target strand explicit.""" doRc = (keepTStrandImplicit and (len(self.strand) == 1) and (self.qStrand == "-")) swap = Psl(qName=self.tName, qSize=self.tSize, qStart=self.tStart, qEnd=self.tEnd, tName=self.qName, tSize=self.qSize, tStart=self.qStart, tEnd=self.qEnd, strand=self._swapStrand(keepTStrandImplicit, doRc)) swap.match = self.match swap.misMatch = self.misMatch swap.repMatch = self.repMatch swap.nCount = self.nCount swap.qNumInsert = self.tNumInsert swap.qBaseInsert = self.tBaseInsert swap.tNumInsert = self.qNumInsert swap.tBaseInsert = self.qBaseInsert if doRc: for i in range(self.blockCount - 1, -1, -1): swap.addBlock(self.blocks[i].swapSidesReverseComplement(swap)) else: for i in range(self.blockCount): swap.addBlock(self.blocks[i].swapSides(swap)) return swap class PslReader(object): """Generator to read PSLs from a tab file or file-like object""" def __init__(self, fspec): self.fspec = fspec def __iter__(self): for psl in TabFileReader(self.fspec, rowClass=Psl.fromRow, hashAreComments=True, skipBlankLines=True): yield psl class PslTbl(list): """Table of PSL objects loaded from a tab-file """ def __init__(self, fileName, qNameIdx=False, tNameIdx=False, qUniqDrop=False): for psl in PslReader(fileName): self.append(psl) self.qNameMap = self.tNameMap = None if qNameIdx: self._mkQNameIdx(qUniqDrop) if tNameIdx: self._mkTNameIdx() def _mkQNameIdx(self, qUniqDrop): self.qNameMap = defaultdict(list) for psl in self: n = dropQueryUniq(psl.qName) if qUniqDrop else psl.qName self.qNameMap[n].append(psl) def _mkTNameIdx(self): self.tNameMap = defaultdict(list) for psl in self: self.tNameMap[psl.tName](psl) self.tNameMap.default_factory = None def getQNames(self): return list(self.qNameMap.keys()) def haveQName(self, qName): return (self.qNameMap.get(qName) is not None) def genByQName(self, qName): """generator to get PSL for a give qName""" ent = self.qNameMap.get(qName) if ent is not None: for psl in ent: yield psl def getByQName(self, qName): """get list of PSLs for a give qName""" return list(self.genByQName(qName)) def getTNames(self): return list(self.tNameMap.keys()) def haveTName(self, tName): return (self.tNameMap.get(tName) is not None) def genByTName(self, tName): """generator to get PSL for a give tName""" ent = self.tNameMap.get(tName) if ent is not None: for psl in ent: yield psl def getByTName(self, tName): """get a list PSL for a give tName""" return list(self.genByTName(tName)) def pslFromExonerateCigar(qName, qSize, qStart, qEnd, qStrand, tName, tSize, tStart, tEnd, tStrand, cigarStr): "create a PSL from an Ensembl-style cigar formatted alignment" def processMatch(psl, size, qNext, tNext): psl.addBlock(PslBlock(qNext, tNext, size)) psl.match += size return (qNext + size, tNext + size) def processInsert(psl, size, tNext): psl.tNumInsert += 1 psl.tBaseInsert += size return tNext + size def processDelete(psl, size, qNext): psl.qNumInsert += 1 psl.qBaseInsert += size return qNext + size cigar = ExonerateCigar(cigarStr) psl = Psl.create(qName=qName, qSize=qSize, qStart=qStart, qEnd=qEnd, tName=tName, tSize=tSize, tStart=tStart, tEnd=tEnd, strand=qStrand + tStrand) qNext = qStart qBlkEnd = qEnd if qStrand == '-': qNext, qBlkEnd = reverseCoords(qNext, qBlkEnd, qSize) tNext = tStart tBlkEnd = tEnd if tStrand == '-': tNext, tBlkEnd = reverseCoords(tNext, tBlkEnd, tSize) for op in cigar: if op.aligned: qNext, tNext = processMatch(psl, op.count, qNext, tNext) elif op.tinsert: tNext = processInsert(psl, op.count, tNext) elif op.tdelete: qNext = processDelete(psl, op.count, qNext) else: raise Exception("invalid CIGAR op {} in {}".format(op, cigar)) if qNext != qBlkEnd: raise Exception("CIGAR length does not match aligned query range: {} {}".format(qName, cigar)) if tNext != tBlkEnd: raise Exception("CIGAR length does not match aligned target range: {} {}".format(qName, cigar)) if psl.tStrand == '-': psl = psl.reverseComplement() psl.strand = psl.strand[0] # BLAT convention return psl
python
from pydantic import BaseSettings class Settings(BaseSettings): MONGO_URI: str = "mongodb://localhost:27017/" APP_DB: str = "ultraapp" JWT_SECRET: str = "S3CR3T" # jwt secret JWT_LIFETIME: int = 3600 * 24 settings = Settings()
python
import time from typing import List class Solution: def evalRPN(self, tokens: List[str]) -> int: stack = [] for token in tokens: if not stack: stack.append(token) if token in {'+', '-', '*', '/'}: y = stack.pop() x = stack.pop() if token == '+': stack.append(x+y) elif token == '-': stack.append(x-y) elif token == '*': stack.append(x*y) elif token == '/': stack.append(int(x/y)) else: stack.append(int(token)) return stack.pop() if __name__ == "__main__": testCases = [ (["2", "1", "+", "3", "*"], 9), (["4", "13", "5", "/", "+"], 6), (["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"], 22) ] for i, testCase in enumerate(testCases): tokens, ans = testCase tic = time.time() ret = Solution().evalRPN(tokens) toc = time.time() print(f"{i}: {ret == ans}, return {ret} in {toc-tic:.3f}s.")
python
""" Create an OpenVINO model package to upload to Azure Blob Storage and use IoT Hub module update twin to update the Azure Percept AzureEyeModule. """ import argparse import os import json import zipfile import datetime from azure.storage.blob import ( BlockBlobService, BlobPermissions, ) from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import Twin, TwinProperties def create_openvino_image_classification_model_config(model_filepath, label_filename='labels.txt'): """ Create the AzureEyeModule config.json file for an image classification model. Returns the config filepath. """ # Create the config.json file config = { "DomainType": "classification", "LabelFileName": label_filename, "ModelFileName": os.path.basename(model_filepath) # model filepath is the .xml openvino model file } # write the config.json file in the model directory config_filepath = os.path.join(os.path.dirname(model_filepath), "config.json") with open(config_filepath, "w") as f: json.dump(config, f) return config_filepath def zip_openvino_image_classification_model_package(config_filepath): """ Zip the model directory for uploading to IoT Hub. Return the zip filepath. """ # read the config json with open(config_filepath, "r") as f: config = json.load(f) # create the zip file from config.json, the label file, and the model xml and bin files config_dirname = os.path.dirname(os.path.abspath(config_filepath)) model_no_ext = os.path.splitext(config["ModelFileName"])[0] model_bin_filename = f"{model_no_ext}.bin" # get the model .bin filename from the .xml file name # create the zip filepath from the model name zip_filepath = os.path.join(os.path.dirname(config_filepath), f"{model_no_ext}.zip") with zipfile.ZipFile(zip_filepath, "w") as zf: zf.write(config_filepath, arcname="config.json") zf.write(os.path.join(config_dirname, config["LabelFileName"]), arcname=config["LabelFileName"]) zf.write(os.path.join(config_dirname, config["ModelFileName"]), arcname=config["ModelFileName"]) zf.write(os.path.join(config_dirname, model_bin_filename), arcname=os.path.basename(model_bin_filename)) return zip_filepath def upload_model_zip(model_zip_filepath, model_container_name, storage_account_name, storage_account_key): """ Upload the OpenVINO model package to Azure Blob Storage and return the download URL. """ # create a BlockBlobService object with Azure storage account name and key block_blob_service = BlockBlobService(account_name=storage_account_name, account_key=storage_account_key) # create a container for the model block_blob_service.create_container(model_container_name, fail_on_exist=False) # upload the model package to the container model_blob_name = os.path.basename(model_zip_filepath) block_blob_service.create_blob_from_path( container_name=model_container_name, blob_name=model_blob_name, file_path=model_zip_filepath, ) # get the model download URL model_download_url = block_blob_service.make_blob_url( model_container_name, model_blob_name, protocol='https', sas_token=block_blob_service.generate_blob_shared_access_signature( container_name=model_container_name, blob_name=model_blob_name, permission=BlobPermissions.READ, expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=1) ) ) return model_download_url def update_percept_module_twin(model_download_url, connection_string, device_id, module_id='azureeyemodule'): """ Update the Azure IoT Hub module twin to use the new model download URL, which will cause the Percept kit to download and run the new model. connection_string, device_id come from IoT Hub: # Go to https://portal.azure.com # Select your IoT Hub # Click on Shared access policies # Click 'service' policy on the right (or another policy having 'service connect' permission) # Copy Connection string--primary key """ iothub_registry_manager = IoTHubRegistryManager(connection_string) module_twin = iothub_registry_manager.get_module_twin(device_id, module_id) print (f"Module twin properties before update:\n{module_twin.properties}") # Update twin twin_patch = Twin() twin_patch.properties = TwinProperties(desired={"ModelZipUrl": model_download_url}) updated_module_twin = iothub_registry_manager.update_module_twin(device_id, module_id, twin_patch, module_twin.etag) print (f"Module twin properties after update:\n{updated_module_twin.properties}") if __name__ == '__main__': # Create a command line parser with the model filepath, Azure Storage account name, key, and model container name options parser = argparse.ArgumentParser() parser.add_argument("--model", "-m", required=True, help="Path to the OpenVINO model .xml file") parser.add_argument('--storage-account-name', type=str, required=True, help='Azure Storage account name') parser.add_argument('--storage-account-key', type=str, required=True, help='Azure Storage account key') parser.add_argument('--storage-container-name', type=str, required=True, help='Azure Storage model container name') parser.add_argument('--iothub-connection-string', type=str, required=True, help='IoT Hub connection string') parser.add_argument('--device-id', type=str, required=True, help='IoT Hub Percept device id') # Parse the command line arguments args = parser.parse_args() # Create the OpenVINO model package config_filepath = create_openvino_image_classification_model_config(args.model) # Zip the model package zip_filepath = zip_openvino_image_classification_model_package(config_filepath) # Upload the model package to Azure Storage model_download_url = upload_model_zip(zip_filepath, args.storage_container_name, args.storage_account_name, args.storage_account_key) # Update the Azure IoT Hub module twin to use the new model package version update_percept_module_twin(model_download_url, args.iothub_connection_string, args.device_id)
python
EPSILON = 0 UNICODE_LATIN_START = 32 UNICODE_LATIN_END = 127 SEEK_RULE = 1 SEEK_RULE_NAME = 2 SEEK_ST_COLON = 3 SEEK_ND_COLON = 4 SEEK_EQUALS = 5 SEEK_ST_PROD = 6 SEEK_ST_TERM = 7 SEEK_ST_NTERM = 8 SEEK_ST_ESC = 9 SEEK_PROD = 10 SEEK_TERM = 11 SEEK_NTERM = 12 SEEK_ESC = 13 SEEK_SPECIAL_TERM = 14 SEEK_SPECIAL_NTERM = 15 SEEK_SPECIAL_DONE = 16 EXPECTED_LT = -1 EMPTY_RULENAME = -2 LT_FOBIDDEN = -3 EXPECTED_COLON = -4 EXPECTED_EQUALS = -5 EMPY_PRODUCTION = -6 INVALID_TOKEN = -7 INVALID_ESCAPE = -8 DUPLICATED_RULE = -9 INVALID_REGULAR = -10 PLUS_BEFORE = -11
python
''' Script to do analysis ''' import argparse import logging import time import torch import transformers import itertools from collections import defaultdict from models import MTModel # Use with care: logging error only while printing analysis for reading sanity transformers.utils.logging.set_verbosity_error() def output_diff(alignment, translation): pass def get_out_token(src_idx, s2t, output): #get 1-best ali out_idx = list(s2t[src_idx])[0] #get token from idx tmp = output.split() out_token = tmp[out_idx] return out_token # Align source and target word sequences with the awesome aligner (expects non-tokenized input) def align(src, tgt): model = transformers.BertModel.from_pretrained('bert-base-multilingual-cased') tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-multilingual-cased') # pre-processing sent_src, sent_tgt = src.strip().split(), tgt.strip().split() token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt] wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt] ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', model_max_length=tokenizer.model_max_length, truncation=True)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', truncation=True, model_max_length=tokenizer.model_max_length)['input_ids'] sub2word_map_src = [] for i, word_list in enumerate(token_src): sub2word_map_src += [i for x in word_list] sub2word_map_tgt = [] for i, word_list in enumerate(token_tgt): sub2word_map_tgt += [i for x in word_list] # alignment align_layer = 8 threshold = 1e-3 model.eval() with torch.no_grad(): out_src = model(ids_src.unsqueeze(0), output_hidden_states=True)[2][align_layer][0, 1:-1] out_tgt = model(ids_tgt.unsqueeze(0), output_hidden_states=True)[2][align_layer][0, 1:-1] dot_prod = torch.matmul(out_src, out_tgt.transpose(-1, -2)) softmax_srctgt = torch.nn.Softmax(dim=-1)(dot_prod) softmax_tgtsrc = torch.nn.Softmax(dim=-2)(dot_prod) softmax_inter = (softmax_srctgt > threshold)*(softmax_tgtsrc > threshold) # src2tgt is a dict mapping src words to their set of aligned tgt words; align_words is the set of alignments for printing alis etc align_subwords = torch.nonzero(softmax_inter, as_tuple=False) align_words = set() src2tgt = defaultdict(set) for i, j in align_subwords: align_words.add( (sub2word_map_src[i], sub2word_map_tgt[j]) ) src2tgt[sub2word_map_src[i]].add(sub2word_map_tgt[j]) return src2tgt, align_words def print_alignments(align_words): for i, j in sorted(align_words): print(f'{color.BOLD}{color.BLUE}{sent_src[i]}{color.END}==={color.BOLD}{color.RED}{sent_tgt[j]}{color.END}') return # printing class color: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' #example: python analysis.py --lang_pair en-es --src "this is a test" --swap_idx 3 --swap_val sentence if __name__=="__main__": parser = argparse.ArgumentParser() parser.add_argument('--lang_pair') parser.add_argument('--src') parser.add_argument('--swap_idx', action='store', type=int) parser.add_argument('--swap_val') args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s') # -- swap analysis -- start = time.time() #instantiate model model = MTModel(args.lang_pair) src_idx = args.swap_idx #standard setting src = args.src out = model.translation_from_string(src) s2t, _ = align(src,out) #noised source src_word = src.split()[ src_idx ] src_swap = args.swap_val src_cos = model.compute_cos(model.get_embed_from_text(src_word), model.get_embed_from_text(src_swap)) print("cossim between src (%s) and sub (%s) is: %f." % (src_word, src_swap, src_cos)) #do swap tmp = src.split() tmp[src_idx] = src_swap swap_src = ' '.join(tmp) swap_out = model.translation_from_string(swap_src) swap_s2t, _ = align(swap_src,swap_out) #noised output out_word = get_out_token(src_idx, s2t, out) out_swap = get_out_token(src_idx, swap_s2t, swap_out) out_cos = model.compute_cos(model.get_embed_from_text(out_word), model.get_embed_from_text(out_swap)) print("cossim between output (%s) and sub (%s) is: %f." % (out_word, out_swap, out_cos)) print(out) print(swap_out) end = time.time() logging.info(f'Time to run script: {end-start} secs')
python
from __future__ import annotations import numpy as np from PySide2.QtCore import QPoint, QRect from PySide2.QtWidgets import QMdiSubWindow class DataViewerSubWindow(QMdiSubWindow): def __init__(self, viewer: DataViewer): super().__init__() self.viewer = viewer self.layout_anchors = None self._laying_out = False self.update_window_title() @property def viewer(self): return self.widget() @viewer.setter def viewer(self, value): self.setWidget(value) def update_window_title(self): self.setWindowTitle(self.viewer.data_path_name) def lay_out_to_anchors(self): if self.layout_anchors is None: return mdi = self.mdiArea() mdi_size = np.array([mdi.width(), mdi.height()]) layout_rect_angle_point_coords = self.layout_anchors * mdi_size layout_rect = QRect(QPoint(*layout_rect_angle_point_coords[0]), QPoint(*layout_rect_angle_point_coords[1])) self._laying_out = True self.setGeometry(layout_rect) self._laying_out = False def show_normal(self): if self.isHidden(): self.show() self.viewer.show() if self.isMinimized(): self.showNormal() def resizeEvent(self, resize_event: QResizeEvent): super().resizeEvent(resize_event) if not self._laying_out and self.layout_anchors is not None: mdi = self.mdiArea() top_left_point = self.mapTo(mdi, self.rect().topLeft()) bottom_right_point = self.mapTo(mdi, self.rect().bottomRight()) mdi_size = np.array([mdi.width(), mdi.height()]) self.layout_anchors[0] = np.array([top_left_point.x(), top_left_point.y()]) / mdi_size self.layout_anchors[1] = np.array([bottom_right_point.x(), bottom_right_point.y()]) / mdi_size
python
class AMQPError(Exception): message = 'An unspecified AMQP error has occurred: %s' def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.message % self.args) # Backward compatibility AMQPException = AMQPError class AMQPConnectionError(AMQPError): message = 'Connection can not be opened' class IncompatibleProtocolError(AMQPConnectionError): message = 'The protocol returned by the server is not supported' class AuthenticationError(AMQPConnectionError): message = ( 'Server and client could not negotiate use of the ' 'authentication mechanisms. Server supports only %r, ' 'but client supports only %r.' ) class ProbableAuthenticationError(AMQPConnectionError): message = ( 'Client was disconnected at a connection stage indicating a ' 'probable authentication error: %s' ) class ConnectionClosed(AMQPConnectionError): message = 'The AMQP connection was closed (%s) %s' class ConnectionSyntaxError(ConnectionClosed): message = ('The sender sent a frame that contained illegal values for ' 'one or more fields. This strongly implies a programming error ' 'in the sending peer: %r') class ConnectionFrameError(ConnectionClosed): message = ('The sender sent a malformed frame that the recipient could ' 'not decode. This strongly implies a programming error ' 'in the sending peer: %r') class ConnectionCommandInvalid(ConnectionClosed): message = ('The client sent an invalid sequence of frames, attempting to ' 'perform an operation that was considered invalid by the server.' ' This usually implies a programming error in the client: %r') class ConnectionChannelError(ConnectionClosed): message = ('The client attempted to work with a channel that had not been ' 'correctly opened. This most likely indicates a fault in the ' 'client layer: %r') class ConnectionUnexpectedFrame(ConnectionClosed): message = ("The peer sent a frame that was not expected, usually in the " "context of a content header and body. This strongly indicates " "a fault in the peer's content processing: %r") class ConnectionResourceError(ConnectionClosed): message = ("The server could not complete the method because it lacked " "sufficient resources. This may be due to the client creating " "too many of some type of entity: %r") class ConnectionNotAllowed(ConnectionClosed): message = ("The client tried to work with some entity in a manner that is " "prohibited by the server, due to security settings or by " "some other criteria: %r") class ConnectionNotImplemented(ConnectionClosed): message = ("The client tried to use functionality that is " "not implemented in the server: %r") class ConnectionInternalError(ConnectionClosed): message = (" The server could not complete the method because of an " "internal error. The server may require intervention by an " "operator in order to resume normal operations: %r") class AMQPChannelError(AMQPError): message = 'An unspecified AMQP channel error has occurred' class ChannelClosed(AMQPChannelError): message = 'The channel was closed (%s) %s' class ChannelAccessRefused(ChannelClosed): message = ('The client attempted to work with a server entity to ' 'which it has no access due to security settings: %r') class ChannelNotFoundEntity(ChannelClosed): message = ('The client attempted to work with a server ' 'entity that does not exist: %r') class ChannelLockedResource(ChannelClosed): message = ('The client attempted to work with a server entity to ' 'which it has no access because another client is working ' 'with it: %r') class ChannelPreconditionFailed(ChannelClosed): message = ('The client requested a method that was not allowed because ' 'some precondition failed: %r') class DuplicateConsumerTag(ChannelClosed): message = 'The consumer tag specified already exists for this channel: %s' class ProtocolSyntaxError(AMQPError): message = 'An unspecified protocol syntax error occurred' class InvalidFrameError(ProtocolSyntaxError): message = 'Invalid frame received: %r' class MethodNotImplemented(AMQPError): pass class DeliveryError(AMQPError): __slots__ = 'message', 'frame' def __init__(self, message, frame): self.message = message self.frame = frame super().__init__()
python
import sys import time from collections import deque from datetime import timedelta from rich import get_console from rich.progress import BarColumn, Progress, ProgressColumn, SpinnerColumn, TextColumn class TimeRemainingColumn(ProgressColumn): """Renders estimated time remaining.""" # Only refresh twice a second to prevent jitter max_refresh = 0.5 def __init__(self, *args, **kwargs): self.start_time = time.time() super().__init__(*args, **kwargs) def render(self, *args, **kwargs): delta = timedelta(seconds=int(time.time() - self.start_time)) return str(delta) class IterationsPerSecond: def format(self, task): if "times" in dir(task) and len(task.times): speed = len(task.times) / task.times[-1] return f"{speed:.2f}it/s" return "0.00it/s" class IndefeniteProgressBar: def __init__(self): with get_console() as console: self.pbar = Progress( SpinnerColumn(style=""), TextColumn("{task.completed}it"), BarColumn(console.width), TextColumn(IterationsPerSecond()), TimeRemainingColumn(), console=console, expand=True, ) self.pbar.start() self.pbar.add_task(None, start=False) self.pbar.tasks[0].times = deque(maxlen=100) self.start_time = time.time() def print(self, *args, sep=" ", end="\n"): msg = sep.join(map(str, args)) sys.stdout.writelines(msg + end) def update(self): task = self.pbar.tasks[0] task.completed += 1 task.times.append(time.time() - self.start_time) def close(self): self.pbar.stop()
python
import arcpy arcpy.env.overwriteOutput = True # Note: Script assumes data from Pro SDK community samples are installed under C:\Data, as follows: inFC = r"E:\GISTech\2021\ProProjects\PythonUsage\PythonUsage.gdb\FCL_Lijn" outFC = r"E:\GISTech\2021\ProProjects\PythonUsage\PythonUsage.gdb\ViaScript" # Buffer the input features creating three buffer distance feature classes arcpy.Buffer_analysis(inFC, outFC, "500 meter") # The following message will be included in the message box from the calling button's OnClick routine print("Python script uitgevoerd.")
python
# -*- coding: utf-8 -*- from bitshares import BitShares from bitshares.instance import set_shared_bitshares_instance from bitshares.amount import Amount from bitshares.price import Price from bitshares.asset import Asset import unittest class Testcases(unittest.TestCase): def __init__(self, *args, **kwargs): super(Testcases, self).__init__(*args, **kwargs) bitshares = BitShares("wss://node.bitshares.eu", nobroadcast=True,) set_shared_bitshares_instance(bitshares) def test_init(self): # self.assertEqual(1, 1) Price("0.315 USD/BTS") Price(1.0, "USD/GOLD") Price(0.315, base="USD", quote="BTS") Price(0.315, base=Asset("USD"), quote=Asset("BTS")) Price( { "base": {"amount": 1, "asset_id": "1.3.0"}, "quote": {"amount": 10, "asset_id": "1.3.106"}, } ) Price( { "receives": {"amount": 1, "asset_id": "1.3.0"}, "pays": {"amount": 10, "asset_id": "1.3.106"}, }, base_asset=Asset("1.3.0"), ) Price(quote="10 GOLD", base="1 USD") Price("10 GOLD", "1 USD") Price(Amount("10 GOLD"), Amount("1 USD")) def test_multiplication(self): p1 = Price(10.0, "USD/GOLD") p2 = Price(5.0, "EUR/USD") p3 = p1 * p2 p4 = p3.as_base("GOLD") self.assertEqual(p4["quote"]["symbol"], "EUR") self.assertEqual(p4["base"]["symbol"], "GOLD") # 10 USD/GOLD * 0.2 EUR/USD = 50 EUR/GOLD = 0.02 GOLD/EUR self.assertEqual(float(p4), 0.02) # Inline multiplication p5 = p1 p5 *= p2 p4 = p5.as_base("GOLD") self.assertEqual(p4["quote"]["symbol"], "EUR") self.assertEqual(p4["base"]["symbol"], "GOLD") # 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.02 GOLD/EUR self.assertEqual(float(p4), 0.02) def test_div(self): p1 = Price(10.0, "USD/GOLD") p2 = Price(5.0, "USD/EUR") # 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD p3 = p1 / p2 p4 = p3.as_base("EUR") self.assertEqual(p4["base"]["symbol"], "EUR") self.assertEqual(p4["quote"]["symbol"], "GOLD") # 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR self.assertEqual(float(p4), 2) def test_div2(self): p1 = Price(10.0, "USD/GOLD") p2 = Price(5.0, "USD/GOLD") # 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD p3 = p1 / p2 self.assertTrue(isinstance(p3, (float, int))) self.assertEqual(float(p3), 2.0)
python
from ._version import VERSION from ._chat_client import ChatClient from ._chat_thread_client import ChatThreadClient from ._generated.models import ( SendChatMessageResult, ChatThreadInfo, ChatMessageType ) from ._shared.user_credential import CommunicationTokenCredential from ._shared.user_token_refresh_options import CommunicationTokenRefreshOptions from ._models import ( ChatThreadParticipant, ChatMessage, ChatThread, ChatMessageReadReceipt, ChatMessageContent ) from ._shared.models import CommunicationUserIdentifier __all__ = [ 'ChatClient', 'ChatThreadClient', 'ChatMessage', 'ChatMessageContent', 'ChatMessageReadReceipt', 'SendChatMessageResult', 'ChatThread', 'ChatThreadInfo', 'CommunicationTokenCredential', 'CommunicationTokenRefreshOptions', 'CommunicationUserIdentifier', 'ChatThreadParticipant', 'ChatMessageType' ] __version__ = VERSION
python
from django.test import TestCase from wagtailmenus.conf import constants from wagtailmenus.models import MainMenu from wagtailmenus.tests import base, utils Page = utils.get_page_model() class MainMenuTestCase(TestCase): """A base TestCase class for testing MainMenu model class methods""" fixtures = ['test.json'] def get_random_menu_instance_with_opt_vals_set(self): obj = MainMenu.objects.order_by('?').first() obj._option_vals = utils.make_optionvals_instance() return obj def get_test_menu_instance(self): return MainMenu.objects.first() class TestMainMenuGeneralMethods(MainMenuTestCase): def test_create_from_collected_values_is_not_implemented(self): # Model-based menus use get_from_collected_values() instead of # create_from_collected_values(), because existing objects are reused, # rather than recreated each time menu = self.get_test_menu_instance() with self.assertRaises(NotImplementedError): menu.create_from_collected_values(None, None) class TestTopLevelItems(MainMenuTestCase): # ------------------------------------------------------------------------ # MainMenu.top_level_items # ------------------------------------------------------------------------ def test_uses_many_queries_when_menu_items_link_to_pages(self): # 6 queries in total: # 1. Fetch menu items # 2. Fetch vanilla pages # 3-7: Fetch specific pages (HomePage, TopLevelPage, LowLevelPage, ArticleListPage, ContactPage) menu = self.get_test_menu_instance() with self.assertNumQueries(7): menu.top_level_items def test_uses_a_single_query_when_no_menu_items_link_to_pages(self): # Replace any menu items that link to pages with links # to custom urls menu = self.get_test_menu_instance() for i, item in enumerate( menu.get_menu_items_manager().all() ): if item.link_page_id: item.link_page = None item.link_url = '/test/{}/'.format(i) item.save() # If no menu items link to pages, no further queries are needed with self.assertNumQueries(1): menu.top_level_items class TestGetPagesForDisplay(MainMenuTestCase): # ------------------------------------------------------------------------ # MainMenu.pages_for_display # ------------------------------------------------------------------------ def test_result(self): menu = MainMenu.objects.get(pk=1) # And a `max_levels` value of 2 self.assertEqual(menu.max_levels, 2) # Every page returned by `pages_for_display` should be a # live, not expired and meant to appear in menus for p in menu.pages_for_display.values(): self.assertTrue(p.live) self.assertFalse(p.expired) self.assertTrue(p.show_in_menus) # Their should be 12 pages total, 1 for each item, plus children: # 1. <HomePage: Home>, # 2. <TopLevelPage: About us> # 3. <LowLevelPage: Meet the team> # 4. <LowLevelPage: Our heritage> # 5. <LowLevelPage: Our mission and values> # X. <TopLevelPage: Superheroes> - not included (show_in_menus=False) # 6. <LowLevelPage: Marvel Comics> # 7. <LowLevelPage: D.C. Comics> # 8. <TopLevelPage: News & events> # 9. <LowLevelPage: Latest news> # 10. <LowLevelPage: Upcoming events> # 11. <LowLevelPage: In the press> # 12. <ContactPage: Contact us> self.assertEqual(len(menu.pages_for_display), 12) # After being called once, pages_for_display should be cached, so # accessing it again shouldn't trigger any database queries with self.assertNumQueries(0): list(menu.pages_for_display.values()) class TestAddMenuItemsForPages(MainMenuTestCase): # ------------------------------------------------------------------------ # MainMenu.add_menu_items_for_pages() # ------------------------------------------------------------------------ def test_add_menu_items_for_pages(self): menu = MainMenu.objects.get(pk=1) # The current number of menu items is 6 self.assertEqual(menu.get_menu_items_manager().count(), 6) # 'Superheroes' has 2 children: 'D.C. Comics' & 'Marvel Comics' superheroes_page = Page.objects.get(title="Superheroes") children_of_superheroes = superheroes_page.get_children() self.assertEqual(children_of_superheroes.count(), 2) # Use 'add_menu_items_for_pages' to add pages for the above pages menu.add_menu_items_for_pages(children_of_superheroes) # The number of menu items should now be 8 self.assertEqual(menu.get_menu_items_manager().count(), 8) # Evaluate menu items to a list menu_items = list(menu.get_menu_items_manager().all()) # The last item should be a link to the 'D.C. Comics' page, and the # sort_order on the item should be 7 dc_item = menu_items.pop() self.assertEqual(dc_item.link_page.title, 'D.C. Comics') self.assertEqual(dc_item.sort_order, 7) # The '2nd to last' item should be a link to the 'Marvel Comics' page, # and the sort_order on the item should be 6 marvel_item = menu_items.pop() self.assertEqual(marvel_item.link_page.title, 'Marvel Comics') self.assertEqual(marvel_item.sort_order, 6) class TestGetSpecifiedSubMenuTemplateName(MainMenuTestCase): # ------------------------------------------------------------------------ # MainMenu._get_specified_sub_menu_template_name() # (inherited from mixins.DefinesSubMenuTemplatesMixin) # ------------------------------------------------------------------------ def test_returns_none_if_no_templates_specified(self): menu = self.get_random_menu_instance_with_opt_vals_set() self.assertEqual( menu._get_specified_sub_menu_template_name(level=2), None ) self.assertEqual( menu._get_specified_sub_menu_template_name(level=3), None ) self.assertEqual( menu._get_specified_sub_menu_template_name(level=4), None ) def test_returns_last_template_when_no_template_specified_for_level(self): menu = MainMenu.objects.all().first() menu._option_vals = utils.make_optionvals_instance( sub_menu_template_names=('single_template.html',) ) self.assertEqual( menu._get_specified_sub_menu_template_name(level=2), 'single_template.html' ) self.assertEqual( menu._get_specified_sub_menu_template_name(level=3), 'single_template.html' ) def test_preference_order_of_specified_values(self): menu = MainMenu.objects.all().first() menu._option_vals = utils.make_optionvals_instance( sub_menu_template_name='single_template_as_option.html', sub_menu_template_names=('option_one.html', 'option_two.html') ) menu.sub_menu_template_name = 'single_template_as_attr.html' menu.sub_menu_template_names = utils.SUB_MENU_TEMPLATE_LIST # While both 'sub_menu_template_name' and 'sub_menu_template_names' are # specified as option values, the 'sub_menu_template_name' value will # be preferred self.assertEqual( menu._get_specified_sub_menu_template_name(level=4), 'single_template_as_option.html' ) # If only 'sub_menu_template_names' is specified as an option value, # that will be preferred menu._option_vals = utils.make_optionvals_instance( sub_menu_template_name=None, sub_menu_template_names=('option_one.html', 'option_two.html') ) self.assertEqual( menu._get_specified_sub_menu_template_name(level=4), 'option_two.html', ) # If no templates have been specified via options, the # 'sub_menu_template_name' attribute is preferred menu._option_vals = utils.make_optionvals_instance( sub_menu_template_name=None, sub_menu_template_names=None ) self.assertEqual( menu._get_specified_sub_menu_template_name(level=4), 'single_template_as_attr.html' ) # If the 'sub_menu_template_name' attribute is None, the method # should prefer the 'sub_menu_template_names' attribute menu.sub_menu_template_name = None self.assertEqual( menu._get_specified_sub_menu_template_name(level=4), menu.sub_menu_template_names[1] ) class TestGetSubMenuTemplateNames( MainMenuTestCase, base.GetSubMenuTemplateNamesMethodTestCase ): """ Tests MainMenu.get_sub_menu_template_names() using common test cases from base.GetTemplateNamesMethodTestCase """ expected_default_result_length = 4 class TestGetTemplateNames( MainMenuTestCase, base.GetTemplateNamesMethodTestCase ): """ Tests MainMenu.get_template_names() using common test cases from base.GetTemplateNamesMethodTestCase """ expected_default_result_length = 3 def mock_relative_url_method(self, site=None): return ''
python
""" Receba a altura do degrau de uma escada e a altura que o usuário deseja alcançar subindo a escada. Calcule e mostre quantos degraus o usuário deverá subir para atingir o seu objetivo. """ a = float(input('Qual é a altura do degrau da escada (cm)? ')) ab = float(input('Qual é a altura que você deseja alcançar subindo a escada (metros)? ')) x = (ab * 100) / a print(f'O usuário deverá subir {x:.0f} degraus para alcançar o objetivo.')
python
import numpy as np from netCDF4 import Dataset from .utils import popEntries,setDimensions from .OBSstruct import OBSstruct import pandas as pd def remove_duplicates(S, coordinate = 'fractional'): ''' This function identifies duplicated observations and makes sure all observation on output are unique. Input: OBS - OBSstruct object or observation netcdf file coordinate - Whether to base method on fractional grid coordinates (default) or use lon/lat/depth 'geographical' ''' if not isinstance(S,OBSstruct): fid = Dataset(S) OBS = OBSstruct(fid) else: OBS=OBSstruct(S) # New method OBSout = OBSstruct() OBSout.variance = OBS.variance OBSout.Nstate = OBS.Nstate OBSout.spherical = OBS.spherical OBSout.globalatts = OBS.globalatts # Create a pandas dataframe from the observation object: data = {} for name in OBS.getfieldlist(): data[name] = getattr(OBS, name) if coordinate == 'fractional': identifyers = {'X' : 'Xgrid', 'Y':'Ygrid', 'Z':'Zgrid'} elif coordinate == 'geographical': identifyers = {'X' : 'lon', 'Y':'lat', 'Z':'depth'} identifyers['T'] = 'time' identifyers['V'] = 'value' # expand data with rounded values that will be used to test uniqueness for name in identifyers.keys(): data[name] = np.round(getattr(OBS, identifyers[name]), 3) # Finally, the dataframe: df = pd.DataFrame(data) df=df.drop_duplicates(subset = ["T","X","Y","Z","V","type"]) # Convert the reduced data set back to observation object for name in OBS.getfieldlist(): setattr(OBSout, name, df[name].values) OBSout = setDimensions(OBSout) return OBSout
python
import matplotlib.pyplot as plt import csv import random import numpy as np import math import matplotlib.patches as patches data = {} with open('datasets/data_boston.csv', 'r') as csvfile: csvfile.readline() file = csv.reader(csvfile, delimiter=',') for row in file: if data.has_key(row[5]): data[row[5]].append([float(row[14]), float(row[15]), row[5]]) else: data[row[5]] = [[float(row[14]), float(row[15]), row[5]]] data_list = [] lat_min = 99 lat_max = -99 long_min = 99 long_max = -99 print "data done" violation_map = {} i=0 for key,value in data.iteritems(): random.shuffle(value) if len(value) > 20000: violation_map[key] = i i = i+1 for val in value[:20000]: if val[0] > lat_max: lat_max = val[0] if val[0] < lat_min: lat_min = val[0] if val[1] > long_max: long_max = val[1] if val[1] < long_min: long_min = val[1] data_list.append(val) print "data list done" del data count = {} print lat_max, lat_min, long_max, long_min lat_range_min = 999 lat_range_max = -999 long_range_min = 999 long_range_max = -999 for l in data_list: lat_key = int(math.floor((l[0]-lat_min)*1000)) long_key = int(math.floor(math.fabs(l[1]-long_min)*1000)) if lat_key > lat_range_max: lat_range_max = lat_key if lat_key < lat_range_min: lat_range_min = lat_key if long_key > long_range_max: long_range_max = long_key if long_key < long_range_min: long_range_min = long_key if not count.has_key((lat_key, long_key)): count[(lat_key, long_key)] = [0 for j in range(len(violation_map))] count[(lat_key, long_key)][violation_map[l[2]]] = count[(lat_key, long_key)][violation_map[l[2]]] + 1 print lat_range_min, lat_range_max, long_range_min, long_range_max """ for key,value in count.iteritems(): print key, value """ """ lat_range_min = int(math.floor((lat_min-math.floor(lat_min))*1000)) lat_range_max = int(math.floor((lat_max-math.floor(lat_max))*1000)) long_range_min = int(math.floor((long_min-math.floor(long_min))*1000)) long_range_max = int(math.floor((long_max-math.floor(long_max))*1000)) """ fig = plt.figure() ax = fig.add_subplot(111, aspect='equal') ax.set_xlim([lat_range_min, lat_range_max]) ax.set_ylim([long_range_min, long_range_max]) print lat_range_min, lat_range_max, long_range_min, long_range_max for i in range(lat_range_min, lat_range_max): for j in range(long_range_min, long_range_max): #print i,j if count.has_key((i,j)): tot = count[(i,j)][0]+count[(i,j)][1]+count[(i,j)][2] red = int(count[(i,j)][0]*255/tot) blue = int(count[(i,j)][1]*255/tot) green = int(count[(i,j)][2]*255/tot) color = '#'+('0'+str(hex(red).split('x')[1]))[-2:] + ('0'+str(hex(blue).split('x')[1]))[-2:] +('0'+str(hex(green).split('x')[1]))[-2:] ax.add_patch( patches.Rectangle( (i, j), 1, 1, facecolor=color, linewidth=0, ) ) fig.savefig('rect.png', dpi=1000, bbox_inches='tight') plt.show() """ fig1 = plt.figure() ax1 = fig1.add_subplot(111, aspect='equal') ax1.add_patch( patches.Rectangle( (0.1, 0.1), # (x,y) 0.5, # width 0.5, # height facecolor = color, ) ) fig1.savefig('rect1.png', dpi=90, bbox_inches='tight') plt.show() """ """ division = 1000 lat_interval = (lat_max-lat_min)/division long_interval = (long_max-long_min)/division count_in_grid = [[[0,0,0,0] for i in range(division)] for j in range(division)] print "array init done" for i in range(division): print i, " of ", division for j in range(division): for l in data_list: if l[0] < lat_min + (i+1)*lat_interval and l[0] > lat_min + i*lat_interval and l[1] < long_min + (i+1)*long_interval and l[1] > long_min + i*long_interval: count_in_grid[i][j][violation_map[l[3]]] = count_in_grid[i][j][violation_map[l[3]]] + 1 print count_in_grid """
python
#!/usr/bin/python #coding:utf-8 import os import re import string import linecache import shutil #Get file name from given directory directoryPath = os.getcwd() #directoryPath2 = os.getcwd() + '\\New' file_extension = ".md" if __name__ == '__main__': for fileName in os.listdir(directoryPath): if(fileName.endswith(file_extension)): file1 = directoryPath + '\\' + fileName file2 = directoryPath + '\\New\\' + fileName with open(file1, "r") as f1, open(file2, "w") as f2: for line in f1: if '<br /><p style="text-align:center"><a href="https://www.seeedstudio.com/act-4.html" target="_blank"><img src="https://github.com/SeeedDocument/Wiki_Banner/raw/master/new_product.jpg" /></a></p>' in line: line = line.replace('<br /><p style="text-align:center"><a href="https://www.seeedstudio.com/act-4.html" target="_blank"><img src="https://github.com/SeeedDocument/Wiki_Banner/raw/master/new_product.jpg" /></a></p>', '<br /><p style="text-align:center"><a href="https://www.seeedstudio.com/act-4.html?utm_source=wiki&utm_medium=wikibanner&utm_campaign=newproducts" target="_blank"><img src="https://github.com/SeeedDocument/Wiki_Banner/raw/master/new_product.jpg" /></a></p>') f2.write(line) os.remove(file1) os.rename(file2, file1)
python
from .point_cloud import PointCloud, PointCloudMeta, PointCloudSpatial # noqa
python
def empty_graph(n): res = [] for i in range(n): res.append([0]*n) return res def convert(graph): matrix = [] for i in range(len(graph)): matrix.append([0]*len(graph)) for j in graph[i]: matrix[i][j] = 1 return matrix def prims_algo(graph): graph1 = convert(graph) n = len(graph1) tree = empty_graph(n) con =[0] while len(con) < n : found = False for i in con: for j in range(n): if j not in con and graph1[i][j] == 1: tree[i][j] =1 tree[j][i] =1 con += [j] found = True break if found : break return tree matrix = [[0, 1, 1, 1, 0, 1, 1, 0, 0], [1, 0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 1], [1, 0, 0, 0, 1, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0]] lst = [[1,2,3,5,6],[0,3,6,7],[0,3],[0,1,2,4],[3,5,8],[0,4,8],[0,1],[1],[4,5]] print("From graph to spanning tree:\n") print(prims_algo(lst))
python
# # customization fragment to run L1 GT emulator starting from a RAW file # # V.M. Ghete 2010-06-09 import FWCore.ParameterSet.Config as cms def customise(process): # # (re-)run the L1 GT emulator starting from a RAW file # from L1Trigger.Configuration.L1Trigger_custom import customiseL1GtEmulatorFromRaw process=customiseL1GtEmulatorFromRaw(process) # # special configuration cases (change to desired configuration in customize_l1TriggerConfiguration) # from L1Trigger.Configuration.customise_l1TriggerConfiguration import customiseL1TriggerConfiguration process=customiseL1TriggerConfiguration(process) # # customization of output commands # from L1Trigger.Configuration.L1Trigger_custom import customiseOutputCommands process=customiseOutputCommands(process) # # print the L1 trigger report # comment/un-comment the corresponding flag # #printL1TriggerReport = False printL1TriggerReport = True if printL1TriggerReport == True : from L1Trigger.Configuration.L1Trigger_custom import customiseL1TriggerReport process=customiseL1TriggerReport(process) process.SimL1Emulator_L1TriggerReport = cms.Sequence(process.SimL1Emulator*process.l1GtTrigReport) process.L1simulation_step.replace(process.SimL1Emulator,process.SimL1Emulator_L1TriggerReport) process.l1GtTrigReport.L1GtRecordInputTag = "simGtDigis" # return (process)
python
""" Overview: Useful functions for build representation format of object. """ from typing import List, Tuple __all__ = [ 'get_repr_info', ] def get_repr_info(cls: type, args: List[Tuple]) -> str: """ Overview: Get representation information for object. Can be used in ``__repr__`` method for class. Arguments: - cls (:obj:`type`): Object's type. - args (:obj:`List[Tuple]`): Argument display information. Returns: - repr (:obj:`str`): Representation string. Examples:: >>> from hbutils.model import get_repr_info >>> class Sum: ... def __init__(self, a, b): ... self.__a = a ... self.__b = b ... def __repr__(self): ... return get_repr_info( ... cls=self.__class__, ... args=[ ... ('b', lambda: self.__b, lambda: self.__b is not None), ... ('a', lambda: self.__a), ... ] ... ) ... >>> Sum(1, 2) <Sum b: 2, a: 1> >>> Sum(1, None) <Sum a: 1> >>> Sum(None, None) <Sum a: None> """ _data_items = [] for item in args: if isinstance(item, tuple): if len(item) == 2: name, fd = item if isinstance(fd, tuple): _data_func, _present_func = fd else: _data_func, _present_func = fd, lambda: True elif len(item) == 3: name, _data_func, _present_func = item else: raise ValueError('Tuple\'s length should be 2 or 3 but {actual} found.'.format(actual=repr(len(item)))) if _present_func(): _data_items.append('{name}: {data}'.format(name=name, data=_data_func())) else: raise TypeError( 'Argument item should be tuple but {actual} found.'.format(actual=repr(type(item).__name__))) if _data_items: return '<{cls} {data}>'.format(cls=cls.__name__, data=', '.join(_data_items)) else: return '<{cls}>'.format(cls=cls.__name__)
python
# -*- Mode: Python; tab-width: 4 -*- # Copyright (c) 2005-2010 Slide, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the author nor the names of other # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''error Definitions for access/service return code errors/exceptions. ''' import exceptions SUCCESS = 0 # # old style, pass through rc values # UNKNOWN = 1 DUPLICATE_KEY = 2 EXEC_TRACEBACK = 5 AFFINITY_ERROR = 6 # # new style exceptions. # table = {} lookup = lambda i, *a: table.get(i, AccessError)(*a) ACCESS_ERROR_MASK = 0x400 #starting at 1K to avoid collision. class AccessError(exceptions.Exception): id = 0x400 + 0 class DatabaseUnavailable(AccessError): '''DatabaseUnavailable Database was unavailable to service the request ''' id = 0x400 + 1 class NoServiceHandler(AccessError): '''NoServiceHandler The requested service handler does not exist. ''' id = 0x400 + 2 class ServiceTraceback(AccessError): '''ServiceTraceback Unknown/Unhandled exception occured while executing the request. ''' id = 0x400 + 3 class LockTimeout(AccessError): '''LockTimeout resource lock timed out/heavy lock contention ''' id = 0x400 + 4 class ParameterError(AccessError): '''ParameterError The request had incorrect/inconsistent parameters. ''' id = 0x400 + 5 class NoServiceDefined(AccessError): '''NoServiceDefined The request was made with no service defined. ''' id = 0x400 + 6 # # Build ID/exception table # for v in locals().values(): try: if issubclass(v, AccessError): table[v.id] = v except TypeError: pass table[None] = AccessError # # end..
python
from bnop_source.b_code.bnop_facades import BnopFacades from bnop_source.b_code.core.object_model.bnop_repositories import BnopRepositories from bnop_source.b_code.core.object_model.objects.bnop_objects import BnopObjects from boro_common_source.ckids.boro_object_ckids import BoroObjectCkIds from nf_common_source.code.constants.standard_constants import DEFAULT_NULL_VALUE from nf_common_source.code.nf.types.nf_column_types import NfColumnTypes from nf_common_source.code.services.dataframe_service.dataframe_mergers import inner_merge_dataframes from nf_ea_common_tools_source.b_code.nf_ea_common.common_knowledge.ea_connector_types import EaConnectorTypes from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.common_knowledge.collection_types.nf_ea_com_collection_types import NfEaComCollectionTypes from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.common_knowledge.column_types.nf_ea_com_column_types import NfEaComColumnTypes from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.nf_ea_com_universes import NfEaComUniverses INSTANCE_UML_NAMES_COLUMN = \ 'instance_uml_names' TYPE_UML_NAMES_COLUMN = \ 'type_uml_names' def migrate_ea_connectors_in_scope_of_typing_pattern( nf_ea_com_universe: NfEaComUniverses, bnop_repository: BnopRepositories): typing_ea_connectors = \ __get_typing_connectors( nf_ea_com_universe=nf_ea_com_universe) __migrate_typing_connectors( ea_connectors=typing_ea_connectors, bnop_repository=bnop_repository) def __get_typing_connectors( nf_ea_com_universe: NfEaComUniverses) \ -> list: ea_connectors = \ nf_ea_com_universe.nf_ea_com_registry.dictionary_of_collections[NfEaComCollectionTypes.EA_CONNECTORS] ea_classifiers = \ nf_ea_com_universe.nf_ea_com_registry.dictionary_of_collections[NfEaComCollectionTypes.EA_CLASSIFIERS] typing_ea_connectors = \ ea_connectors[ea_connectors[ NfEaComColumnTypes.CONNECTORS_ELEMENT_TYPE_NAME.column_name] == EaConnectorTypes.DEPENDENCY.type_name] typing_ea_connectors_with_uml_names_dataframe = \ inner_merge_dataframes( master_dataframe=typing_ea_connectors, master_dataframe_key_columns=[ NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS.column_name], merge_suffixes=['', '_type_uml_names'], foreign_key_dataframe=ea_classifiers, foreign_key_dataframe_fk_columns=[NfColumnTypes.NF_UUIDS.column_name], foreign_key_dataframe_other_column_rename_dictionary= { NfEaComColumnTypes.EXPLICIT_OBJECTS_EA_OBJECT_NAME.column_name: TYPE_UML_NAMES_COLUMN }) typing_ea_connectors_with_uml_names_dataframe = \ inner_merge_dataframes( master_dataframe=typing_ea_connectors_with_uml_names_dataframe, master_dataframe_key_columns=[ NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name], merge_suffixes=['', '_instance_uml_names'], foreign_key_dataframe=ea_classifiers, foreign_key_dataframe_fk_columns=[NfColumnTypes.NF_UUIDS.column_name], foreign_key_dataframe_other_column_rename_dictionary= { NfEaComColumnTypes.EXPLICIT_OBJECTS_EA_OBJECT_NAME.column_name: INSTANCE_UML_NAMES_COLUMN }) typing_ea_connectors_with_uml_names_dataframe.fillna( value=DEFAULT_NULL_VALUE, inplace=True) typing_ea_connectors_with_uml_names = \ typing_ea_connectors_with_uml_names_dataframe.to_dict( orient='records') return \ typing_ea_connectors_with_uml_names def __migrate_typing_connectors( ea_connectors: list, bnop_repository: BnopRepositories): for ea_connector in ea_connectors: __migrate_typing_connector( ea_connector=ea_connector, bnop_repository=bnop_repository) def __migrate_typing_connector( bnop_repository: BnopRepositories, ea_connector: dict): typing_tuple_nf_uuid = \ ea_connector[NfColumnTypes.NF_UUIDS.column_name] instance_nf_uuid = \ ea_connector[NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name] instance_uml_name = \ ea_connector[INSTANCE_UML_NAMES_COLUMN] type_nf_uuid = \ ea_connector[NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS.column_name] type_uml_name = \ ea_connector[TYPE_UML_NAMES_COLUMN] if instance_nf_uuid in BnopObjects.registry_keyed_on_uuid: bnop_instance = \ BnopObjects.registry_keyed_on_uuid[instance_nf_uuid] else: bnop_instance = \ BnopFacades.create_bnop_object( object_uuid=instance_nf_uuid, owning_repository_uuid=bnop_repository.uuid, presentation_name=instance_uml_name) if type_nf_uuid in BnopObjects.registry_keyed_on_uuid: bnop_type = \ BnopObjects.registry_keyed_on_uuid[type_nf_uuid] else: bnop_type = \ BnopFacades.create_bnop_type( type_uuid=type_nf_uuid, owning_repository_uuid=bnop_repository.uuid, presentation_name=type_uml_name) BnopFacades.create_bnop_tuple_from_two_placed_objects( tuple_uuid=typing_tuple_nf_uuid, placed1_object=bnop_type, placed2_object=bnop_instance, immutable_minor_composition_couple_type_boro_object_ckid=BoroObjectCkIds.TypesInstances, owning_repository_uuid=bnop_repository.uuid)
python
""" Unit tests for flat_file.py See: https://code.visualstudio.com/docs/python/testing """ import unittest from cred_manage.flat_file import FlatFileCredContainer import os FLAT_FILE_THAT_EXISTS='/tmp/file_that_exist.txt' FLAT_FILE_THAT_DOES_NOT_EXIST='/tmp/file_that_not_exists.txt' def setUpModule(): """ Boilerplate to ensure the conditions are right for these tests """ # See that there is a flat file that actually exists with open(FLAT_FILE_THAT_EXISTS, 'w') as f: f.write("There is content in this file.\n") # Ensure that there is no such file on disk with the name in FLAT_FILE_THAT_DOES_NOT_EXIST if os.path.exists(FLAT_FILE_THAT_DOES_NOT_EXIST): os.remove(FLAT_FILE_THAT_DOES_NOT_EXIST) def tearDownModule(): """ Post-testing cleanup """ # Clean up the flat file we generated as part of setUpModule if os.path.exists(FLAT_FILE_THAT_EXISTS): os.remove(FLAT_FILE_THAT_EXISTS) # It exists no longer class Test_FlatFileCredContainer(unittest.TestCase): def test_init_with_bad_file_name(self): """ Assert that a FileNotFoundError is raised when we try to init FlatFileCredContainer with a bad file name """ self.assertRaises(FileNotFoundError, FlatFileCredContainer, file_path=FLAT_FILE_THAT_DOES_NOT_EXIST) def test_init_with_valid_file_name(self): """ Assert that no Exceptions are raised by __ini__ for FlatFileCredContainer when instantiating with a valid file name """ # Armed with a file that exists, init the object. We expect no exceptions to be raised try: o = FlatFileCredContainer(file_path=FLAT_FILE_THAT_EXISTS, allow_broad_permissions=True) except Exception as ex: self.fail(f"An unexpected exception occurred when instantiating the FlatFileCredContainer during the test: {str(type(ex))}") def test_get_cred_method_implemented(self): """ Asserts that the get_cred method has been implemented. The superclass will raise a NotImplementedError otherwise """ o = FlatFileCredContainer(file_path=FLAT_FILE_THAT_EXISTS, allow_broad_permissions=True) try: c = o.get_cred(self) except NotImplementedError as ex: self.fail(f"The get_cred() method has not been implemented in the subclass: {type(o)}") #TODO: Add a test to see that set cred is implemented #TODO: Add a test to see that delete cred is implemented
python
from marshmallow import fields, validate from app import ma from nfmanagementapi.models import FilterRule class FilterRuleSchema(ma.SQLAlchemyAutoSchema): class Meta: model = FilterRule ordered = True uuid = fields.UUID(required=True, description="Unique Identifier", dump_only=True) name = fields.String(required=True, description="Rule name") description = fields.String(required=False, description="Description") source = fields.List(fields.UUID(), required=False, description="list of Source object UUIDs") destination = fields.List(fields.UUID(), required=False, description="list of Destination object UUIDs") service = fields.List(fields.UUID(), required=False, description="list of Service UUIDs") action = fields.String(required=True, description="Action to apply", validate=validate.OneOf(["accept", "drop"])) ctime = fields.DateTime(required=True, description="Creation time", dump_only=True) mtime = fields.DateTime(required=True, description="Modification time", dump_only=True)
python
import sys import Adafruit_DHT import Adafruit_BMP.BMP085 as BMP085 import requests def getReadings(): humidity, dht_temp = Adafruit_DHT.read_retry(22, 4) if humidity is not None and dht_temp is not None: bmp_sensor = BMP085.BMP085() pressure = bmp_sensor.read_pressure() bmp_temp = bmp_sensor.read_temperature() if pressure is not None and bmp_temp is not None: data = {} data['temperatureBmp'] = bmp_temp data['temperatureDht'] = dht_temp data['humidity'] = humidity data['pressure'] = pressure return data return None data = getReadings() print(data) requests.post('http://pharylonapi.azurewebsites.net/api/weather/reading', data)
python
# Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode: if not l1: return l2 if not l2: return l1 if l1.val < l2.val: return ListNode(l1.val, self.mergeTwoLists(l1.next, l2)) else: return ListNode(l2.val, self.mergeTwoLists(l1, l2.next))
python
import bug_killer_client.network.project as project_client from bug_killer_api_interface.schemas.request.project import CreateProjectPayload, UpdateProjectPayload from bug_killer_api_interface.schemas.response import UserProjectsResponse, ProjectResponse async def get_user_projects(auth: str) -> UserProjectsResponse: """ Get the projects that the user is a manager or member of auth: The cognito user's id token """ raw_rsp = await project_client.get_user_projects(auth) return UserProjectsResponse.parse_obj(raw_rsp) async def get_project(auth: str, project_id: str) -> ProjectResponse: """ Get project by its id auth: The cognito user's id token project_id: The id of the project to get """ raw_rsp = await project_client.get_project(auth, project_id) return ProjectResponse.parse_obj(raw_rsp) async def create_project(auth: str, payload: CreateProjectPayload) -> ProjectResponse: """ Creates a project auth: The cognito user's id token payload: The details of the project to create """ raw_rsp = await project_client.create_project(auth, payload.api_dict()) return ProjectResponse.parse_obj(raw_rsp) async def update_project(auth: str, project_id: str, payload: UpdateProjectPayload) -> ProjectResponse: """ Updates a project by its id auth: The cognito user's id token project_id: The id of the project to update payload: The details of the project to update """ raw_rsp = await project_client.update_project(auth, project_id, payload.api_dict()) return ProjectResponse.parse_obj(raw_rsp) async def delete_project(auth: str, project_id: str) -> ProjectResponse: """ Deletes a project by its id auth: The cognito user's id token project_id: The id of the project to delete """ raw_rsp = await project_client.delete_project(auth, project_id) return ProjectResponse.parse_obj(raw_rsp)
python