content
stringlengths
0
894k
type
stringclasses
2 values
# Import the modules import sys import MinVel as mv import numpy as np # NOTES: May want to update temperature dependence of thermal expansivity using Holland and Powell's (2011) # new revised equations (see figure 1 in that article). This will necessitate recalculating the first # Gruneisen parameters. This could provide more realistic temperature dependence of material # properties within the mantle. if len(sys.argv) > 1: if sys.argv[1] == "-h": print('MinVel -- Program to calculate mineral aggregate moduli and density') print('') print(' Written by Oliver Boyd') print('') print(' This program calculates the velocity and density of a mineral assemblage ') print(' at a given pressure and temperature (which may be vectors).') print(' The velocities are expressed as Voigt, Reuss, and Voigt-Reuss-Hill averages.') print('') print(' The data required for this analysis is taken from Hacker and Abers (2003),') print(' updated by Abers and Hacker in 2016, and expanded by Boyd in 2018.') print(' The moduli at pressure and temperature are calculated based on the') print(' procedures of Hacker and Abers (2004), Bina and Helffrich (1992) and') print(' Holland and Powell (1998) as outlined in the supplementary section of ') print(' Boyd et al. (2004) with updates by Abers and Hacker (2016) for quartz.') print('') print(' OUTPUT (SI Units)') print(' results.npy - numpy binary file containing the following vectors:') print(' Voigt-Reuss-Hill averages') print(' K - Bulk modulus') print(' G - Shear modulus') print(' E - Youngs modulus') print(' l - Lambda') print(' v - Poissons ratio') print(' Vp - P-wave velocity') print(' Vs - S-wave velocity') print(' p - Density') print(' a - Thermal Expansivity') print(' Voigt(v) and Reuss(r) bounds on velocity') print(' Vpv - P-wave velocity') print(' Vpr - P-wave velocity') print(' Vsv - S-wave velocity') print(' Vsr - S-wave velocity') print('') print(' INPUTS') print(' Command line options') print(' -h Help about this program.') print('') print(' -f InputFile - File containing composition, temperature, and pressure ') print(' information with the following format') print(' MinIndx 1, MinIndx 2, ..., MinIndx N') print(' VolFrac 1, VolFrac 2, ..., VolFrac N') print(' T1, P1') print(' T2, P2') print(' ...') print(' TN, PN') print('') print(' -p Pressure - desired pressure or comma separated vector of pressures (Pa)') print(' -t Temperature - desired temperature or comma separated vector of temperatures (K)') print('') print(' Composition parmeters - a composition structure with the following fields: ') print(' -cm Min - The mineral index comma separated vector.') print(' -cv Fr - Volume fraction for each mineral in Min (0 to 1), comma separated.') print('') print(' Mineral Indexes') print(' Quartz') print(' 1. Alpha Quartz ') print(' 2. Beta Quartz ') print(' 3. Coesite ') print(' Feldspar group') print(' Plagioclase') print(' 4. High Albite ') print(' 5. Low Albite ') print(' 6. Anorthite ') print('') print(' 7. Orthoclase ') print(' 8. Sanidine ') print(' Garnet structural group') print(' 9. Almandine ') print(' 10. Grossular ') print(' 11. Pyrope ') print(' Olivine group') print(' 12. Forsterite ') print(' 13. Fayalite ') print(' Pyroxene group') print(' 14. Diopside ') print(' 15. Enstatite ') print(' 16. Ferrosilite ') print(' 79. Mg-Tschermak ') print(' 17. Jadeite ') print(' 18. Hedenbergite ') print(' 80. Acmite ') print(' 81. Ca-Tschermak ') print(' Amphibole supergroup') print(' 19. Glaucophane ') print(' 20. Ferroglaucophane ') print(' 21. Tremolite ') print(' 22. Ferroactinolite ') print(' 23. Tshermakite ') print(' 24. Pargasite ') print(' 25. Hornblende ') print(' 26. Anthophyllite ') print(' Mica group') print(' 27. Phlogopite ') print(' 28. Annite ') print(' 29. Muscovite ') print(' 30. Celadonite ') print(' Other') print(' 31. Talc ') print(' 32. Clinochlore ') print(' 33. Daphnite ') print(' 34. Antigorite ') print(' 35. Zoisite ') print(' 36. Clinozoisite ') print(' 37. Epidote ') print(' 38. Lawsonite ') print(' 39. Prehnite ') print(' 40. Pumpellyite ') print(' 41. Laumontite ') print(' 42. Wairakite ') print(' 43. Brucite ') print(' 44. Clinohumite ') print(' 45. Phase A ') print(' 46. Sillimanite ') print(' 47. Kyanite ') print(' 48. Spinel ') print(' 49. Hercynite ') print(' 50. Magnetite ') print(' 51. Calcite ') print(' 52. Aragonite ') print(' 82. Magnesite ') print(' 83. En79Fs09Ts12 ') print(' 84. Di75He9Jd3Ts12 ') print(' 85. ilmenite ') print(' 86. cordierite ') print(' 87. scapolite (meionite) ') print(' 88. rutile ') print(' 89. sphene ') print(' 53. Corundum ') print(' 54. Dolomite ') print(' 74. Halite ') print(' 77. Pyrite ') print(' 78. Gypsum ') print(' 90. Anhydrite ') print(' 0. Water ') print(' -1. Ice ') print(' Clays') print(' 55. Montmorillonite (Saz-1)') print(' 56. Montmorillonite (S Wy-2)') print(' 57. Montmorillonite (STX-1)') print(' 58. Montmorillonite (S Wy-1)') print(' 59. Montmorillonite (Shca-1)') print(' 60. Kaolinite (Kga-2)') print(' 61. Kaolinite (Kga-1b)') print(' 62. Illite (IMT-2)') print(' 63. Illite (ISMT-2)') print(' 66. Smectite (S Wa-1)') print(' 70. Montmorillonite (S YN-1)') print(' 71. Chrysotile ') print(' 72. Lizardite ') print(' 76. Dickite ') print('') print(' Example:'); print(' Geophysical parameters for 20% Quartz, 20% low Albite, 30% Forsterite, and 30% Fayalite at') print(' 300, 400, and 500K and 0.1, 0.3, and 0.5 MPa') print(' > python MinVelWrapper.py -t 300,400,500 -p 0.1e6,0.3e6,0.5e6 -cm 1,5,12,13 -cv 0.2,0.2,0.3,0.3') print('') sys.exit() nMin = 1 nPT = 1 nT = 0 nP = 0 if len(sys.argv) > 1: for j in range(1,len(sys.argv),2): if sys.argv[j] == "-t": entries = sys.argv[j+1].split(",") nT = len(entries) T = np.zeros((nT),dtype=np.float64) for k in range(0,nT): T[k] = entries[k] if sys.argv[j] == "-p": entries = sys.argv[j+1].split(",") nP = len(entries) P = np.zeros((nP),dtype=np.float64) for k in range(0,nP): P[k] = entries[k] if sys.argv[j] == "-cm": entries = sys.argv[j+1].split(",") nMin = len(entries) Cm = np.zeros((nMin),dtype=np.int8) for k in range(0,nMin): Cm[k] = entries[k] if sys.argv[j] == "-cv": entries = sys.argv[j+1].split(",") nFr = len(entries) Cv = np.zeros((nFr),dtype=np.float64) for k in range(0,nFr): Cv[k] = entries[k] if sys.argv[j] == "-f": fl = sys.argv[j+1] print('Reading {0:s}'.format(fl)) f = open(fl,"r") if f.mode == "r": nPT = 0 ln = 0 for line in f: line = line.strip() columns = line.split(",") if ln < 2: nMin = len(columns) else: nPT = nPT + 1 ln = ln + 1 nT = nPT nP = nPT nFr = nMin f.close() T = np.zeros((nPT),dtype=np.float64) P = np.zeros((nPT),dtype=np.float64) Cm = np.zeros((nMin),dtype=np.int8) Cv = np.zeros((nMin),dtype=np.float64) f = open(fl,"r") if f.mode == "r": ln = 0 jT = 0 for line in f: line = line.strip() columns = line.split(",") if ln == 0: for j in range(0,len(columns)): Cm[j] = columns[j] elif ln == 1: for j in range(0,len(columns)): Cv[j] = columns[j] else: T[jT] = columns[0] P[jT] = columns[1] jT = jT + 1 ln = ln + 1 f.close() # MAke sure volume fractions sum to 1 if sum(Cv) < 1: print('Composition does not sum to one. - Exiting') sys.exit() if nT != nP: print('Number of temperature inputs must be equal to the number of pressure inputs') sys.exit() else: nPT = nT if nMin != nFr: print('Number of minerals types must be equal to the number of mineral fractional volumes') sys.exit() Par, MinNames, nPar, nAllMin = mv.loadPar('../database/MineralPhysicsDatabase.nc') MinIndex = Par[0,:]; print('{0:21s}{1:20s}'.format('Mineral','Volume fraction')) for j in range(0,nMin): k = mv.find(MinIndex,Cm[j]); print(MinNames[:,k].tobytes().decode('utf-8'),'(',Cv[j],')') if nPT > 1: print('There are',nPT,'temperature and pressure points') else: print('Temperature',T) print('Pressure',P) print('') K, G, E, l, v, Vp, Vs, den, Vpv, Vpr, Vsv, Vsr, a = mv.CalcMV(Cm,Cv,T,P); print('K ',K) print('G ',G) print('E ',E) print('l ',l) print('v ',v) print('Vp ',Vp) print('Vs ',Vs) print('den',den) print('a ',a) print('') print('Voigt(v) and Reuss(r) bounds on velocity') print('Vpv',Vpv) print('Vpr',Vpr) print('Vsv',Vsv) print('Vsr',Vsr) print('') res = np.zeros((13,nPT),dtype=np.float64) res[0,:] = K res[1,:] = G res[2,:] = E res[3,:] = l res[4,:] = v res[5,:] = Vp res[6,:] = Vs res[7,:] = den res[8,:] = a res[9,:] = Vpv res[10,:] = Vpr res[11,:] = Vsv res[12,:] = Vsr f = 'results.npy' np.save(f,res) sys.exit()
python
import os import sys import cv2 import numpy as np from PyQt5.QtCore import pyqtSlot, QThreadPool, QTimer from PyQt5.QtWidgets import * from PyQt5 import QtCore from PyQt5.QtGui import * from src.transformers.Transformer import Transformer, getTransformer from src.util.UserInterface.ControlBox import ControlBox from src.util.UserInterface.Display import Display from src.util.UserInterface.DisplayWorker import DisplayWorker from src.util.UserInterface.RadioBox import RadioBox from src.util.UserInterface.ReferenceCarousel import ReferenceCarousel from src.util.UserInterface.Result import Result from src.util.UserInterface.StartScreen import StartScreen from src.util.UserInterface.TransformWorker import TransformWorker from src.util.UserInterface.TypeSelector import TypeSelector from src.util.capture import Capture BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ref_images = next(os.walk(BASE_DIR + '/../../ref_images'), (None, None, []))[2] NOT_FOUND: QPixmap T: Transformer def set_align_center(x: QWidget) -> QWidget: x.setAlignment(QtCore.Qt.AlignCenter) return x def get_qimage(path: str) -> QPixmap: qimage = QPixmap() qimage.load(path, flags=QtCore.Qt.AutoColor) return qimage class MainWindow(QMainWindow): def __init__(self): super().__init__() self.window_stack = QStackedWidget(self) self.start_screen = StartScreen() self.display_worker = DisplayWorker(capture) self.display = Display() self.radio_box = RadioBox() self.reference_carousel = ReferenceCarousel(ref_images) self.control_box = ControlBox() self.type_selector = TypeSelector(ref_images) self.result = Result() self.transform_worker = TransformWorker(capture, T) self.setWindowTitle("HAiR") self.setGeometry(0, 0, 1920, 1080) self.setup() @pyqtSlot() def start_signal(self): self.window_stack.setCurrentIndex(1) self.type_selector.initialize() self.control_box.initialize() self.display_worker.go = True self.display_worker.start() @pyqtSlot() def close_signal(self): self.close() @pyqtSlot() def result_signal(self): # deprecated dont use self.window_stack.setCurrentIndex(2) self.display_worker.go = False @pyqtSlot(int) def ref_select(self, index: int): self.type_selector.set_reference(self.radio_box.type, index) if self.radio_box.type == "머리 색상": T.set_appearance_ref(ref_images[index][0]) else: T.set_shape_ref(ref_images[index][0]) T.set_structure_ref(ref_images[index][0]) @pyqtSlot(str) def ref_unselect(self, ref_type: str) -> None: if ref_type == "머리 색상": T.set_appearance_ref(None) else: T.set_shape_ref(None) T.set_structure_ref(None) @pyqtSlot(QPixmap) def get_image(self, image: QPixmap): self.display.set_image(image) @pyqtSlot() def back_to_start_signal(self): self.window_stack.setCurrentIndex(0) @pyqtSlot() def qr_done_signal(self): self.window_stack.setCurrentIndex(0) @pyqtSlot(int) def result_clicked_signal(self, timestamp: int): self.qr_result.set(timestamp) self.window_stack.setCurrentIndex(3) @pyqtSlot() def transform_signal(self): self.control_box.transform_button.setDisabled(True) self.control_box.set_processing() pool = QThreadPool.globalInstance() pool.start(self.transform_worker) self.transform_worker = TransformWorker(capture, transformer=T) self.transform_worker.signal.transformed.connect(self.transformed_signal) @pyqtSlot(np.ndarray) def transformed_signal(self, image: np.ndarray): if image.ndim == 1: # when failed self.control_box.set_error() QTimer().singleShot(2000, self.control_box.set_ready) else: self.control_box.set_ready() self.control_box.result_button.setDisabled(False) self.result.set(image) self.control_box.transform_button.setDisabled(False) def setup(self): # Start Screen self.start_screen.start.connect(self.start_signal) self.start_screen.close.connect(self.close_signal) # DISPLAY self.display_worker.finished.connect(self.get_image) # REF CAROUSEL [i.selected_reference.connect(self.ref_select) for i in self.reference_carousel.carousel] # TYPE SELECTOR [i.unselect.connect(self.ref_unselect) for i in self.type_selector.selectors.values()] # CONTROL BOX self.control_box.result.connect(self.result_signal) self.control_box.transform.connect(self.transform_signal) self.control_box.close.connect(self.close_signal) # QR result self.result.qr_done.connect(self.qr_done_signal) # Transform thread self.transform_worker.signal.transformed.connect(self.transformed_signal) # setup UI start = QWidget(self) start.setLayout(self.start_screen) self.setCentralWidget(self.window_stack) transform = QWidget(self) transform_window = set_align_center(QHBoxLayout()) left_box = set_align_center(QVBoxLayout()) right_box = set_align_center(QVBoxLayout()) left_box.addLayout(self.display, 1) left_box.addWidget(self.radio_box) left_box.addLayout(self.reference_carousel, 1) right_box.addLayout(self.type_selector, 3) right_box.addLayout(self.control_box, 1) transform_window.addStretch(1) transform_window.addLayout(left_box, 8) transform_window.addLayout(right_box, 4) transform.setLayout(transform_window) self.window_stack.addWidget(start) # 0 self.window_stack.addWidget(transform) # 1 self.window_stack.addWidget(self.result) # 2 if __name__ == "__main__": T = getTransformer() capture = Capture(0) app = QApplication(sys.argv) ref_images = list( map(lambda x: [ cv2.imread(BASE_DIR + '/../../ref_images/' + x), get_qimage(BASE_DIR + '/../../ref_images/' + x) ], ref_images) ) ref_images.append( [ cv2.imread(BASE_DIR + '/image_not_selected.png'), get_qimage(BASE_DIR + '/image_not_selected.png') ] ) mainWindow = MainWindow() mainWindow.showFullScreen() ret = app.exec_() sys.exit(ret)
python
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # This script provides a basic example of how to use the Health Checks service. # Please review the documentation for more information about # how Health Checks works, including permissions needed. # # https://docs.cloud.oracle.com/iaas/Content/HealthChecks/Concepts/healthchecks.htm import oci from datetime import datetime # Helper to format dates def format_time(timestamp): # Will be ticks, not seconds from epoch return datetime.utcfromtimestamp(timestamp / 1000).strftime('%Y-%m-%d %H:%M:%S') # Default config file and profile config = oci.config.from_file() healthchecks_client = oci.healthchecks.HealthChecksClient(config) # This is the root compartment. You can use another compartment in your tenancy. compartment_id = config["tenancy"] # List of available vantage points vantage_points = healthchecks_client.list_health_checks_vantage_points().data # HttpMonitors examples # Creating a new HttpMonitor: http_monitor = healthchecks_client.create_http_monitor( oci.healthchecks.models.CreateHttpMonitorDetails( compartment_id=compartment_id, display_name="Monitor Name", targets=["example.com"], protocol="HTTPS", vantage_point_names=[vantage_points[0].name], # If not specified we will auto assign 3 vantage points port=443, path="/", is_enabled=False, interval_in_seconds=30, timeout_in_seconds=30 ) ).data # Updating an existing monitor: # Note: You only need to specify any properties you wish to change. # It returns the updated monitor. http_monitor = healthchecks_client.update_http_monitor( monitor_id=http_monitor.id, update_http_monitor_details=oci.healthchecks.models.UpdateHttpMonitorDetails( targets=["example.com", "other.example.com"], is_enabled=True ) ).data print('Display Name: {}, isEnabled: {}'.format(http_monitor.display_name, http_monitor.is_enabled)) # Retrieving monitor results: # There's a pagination helper to get all the pages for you. http_monitor_results = oci.pagination.list_call_get_all_results(healthchecks_client.list_http_probe_results, http_monitor.id) for monitor_result in http_monitor_results.data: print('Result: {}, Start Time: {}, isHealthy: {}'.format(monitor_result.target, format_time(monitor_result.start_time), monitor_result.is_healthy)) # To change the compartment: healthchecks_client.change_http_monitor_compartment( monitor_id=http_monitor.id, change_http_monitor_compartment_details=oci.healthchecks.models.ChangeHttpMonitorCompartmentDetails( compartment_id="NEW_COMPARTMENT_ID" ) ) # The delete will have no return if successful healthchecks_client.delete_http_monitor(monitor_id=http_monitor.id) # PingMonitors examples # Creating a new PingMonitor: ping_monitor = healthchecks_client.create_ping_monitor( oci.healthchecks.models.CreatePingMonitorDetails( compartment_id=compartment_id, display_name="Monitor Name", targets=["example.com"], protocol="ICMP", vantage_point_names=[vantage_points[0].name], # If not specified we will auto assign 3 vantage points is_enabled=False, interval_in_seconds=30, timeout_in_seconds=30 ) ).data # Updating an existing monitor: # Note: You only need to specify any properties you wish to change. # It returns the updated monitor. ping_monitor = healthchecks_client.update_ping_monitor( monitor_id=ping_monitor.id, update_ping_monitor_details=oci.healthchecks.models.UpdatePingMonitorDetails( targets=["example.com", "other.example.com"], is_enabled=True ) ).data print('Display Name: {}, isEnabled: {}'.format(ping_monitor.display_name, ping_monitor.is_enabled)) # Retrieving monitor results: # There's a pagination helper to get all the pages for you. ping_monitor_results = oci.pagination.list_call_get_all_results(healthchecks_client.list_ping_probe_results, ping_monitor.id) for monitor_result in ping_monitor_results.data: print('Result: {}, Start Time: {}, isHealthy: {}'.format(monitor_result.target, format_time(monitor_result.start_time), monitor_result.is_healthy)) # To change the compartment: healthchecks_client.change_ping_monitor_compartment( monitor_id=ping_monitor.id, change_ping_monitor_compartment_details=oci.healthchecks.models.ChangePingMonitorCompartmentDetails( compartment_id="NEW_COMPARTMENT_ID" ) ) # The delete will have no return if successful healthchecks_client.delete_ping_monitor(monitor_id=ping_monitor.id)
python
"""Metrics to assess performance on sequence labeling task given prediction Functions named as ``*_score`` return a scalar value to maximize: the higher the better Reference: seqeval==0.0.19 """ from __future__ import absolute_import, division, print_function import warnings from collections import defaultdict import numpy as np def get_entities(seq, suffix=False): """Gets entities from sequence. Args: seq (list): sequence of labels. Returns: list: list of (chunk_type, chunk_start, chunk_end). Example: >>> from seqeval.metrics.sequence_labeling import get_entities >>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC'] >>> get_entities(seq) [('PER', 0, 1), ('LOC', 3, 3)] """ def _validate_chunk(chunk, suffix): if chunk in ["O", "B", "I", "E", "S"]: return if suffix: if not ( chunk.endswith("-B") or chunk.endswith("-I") or chunk.endswith("-E") or chunk.endswith("-S") ): warnings.warn("{} seems not to be NE tag.".format(chunk)) else: if not ( chunk.startswith("B-") or chunk.startswith("I-") or chunk.startswith("E-") or chunk.startswith("S-") ): warnings.warn("{} seems not to be NE tag.".format(chunk)) # for nested list if any(isinstance(s, list) for s in seq): seq = [item for sublist in seq for item in sublist + ["O"]] prev_tag = "O" prev_type = "" begin_offset = 0 chunks = [] for i, chunk in enumerate(seq + ["O"]): _validate_chunk(chunk, suffix) if suffix: tag = chunk[-1] type_ = chunk[:-1].rsplit("-", maxsplit=1)[0] or "_" else: tag = chunk[0] type_ = chunk[1:].split("-", maxsplit=1)[-1] or "_" if end_of_chunk(prev_tag, tag, prev_type, type_): chunks.append((prev_type, begin_offset, i - 1)) if start_of_chunk(prev_tag, tag, prev_type, type_): begin_offset = i prev_tag = tag prev_type = type_ return chunks def end_of_chunk(prev_tag, tag, prev_type, type_): """Checks if a chunk ended between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_end: boolean. """ chunk_end = False if prev_tag == "E": chunk_end = True if prev_tag == "S": chunk_end = True if prev_tag == "B" and tag == "B": chunk_end = True if prev_tag == "B" and tag == "S": chunk_end = True if prev_tag == "B" and tag == "O": chunk_end = True if prev_tag == "I" and tag == "B": chunk_end = True if prev_tag == "I" and tag == "S": chunk_end = True if prev_tag == "I" and tag == "O": chunk_end = True if prev_tag != "O" and prev_tag != "." and prev_type != type_: chunk_end = True return chunk_end def start_of_chunk(prev_tag, tag, prev_type, type_): """Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean. """ chunk_start = False if tag == "B": chunk_start = True if tag == "S": chunk_start = True if prev_tag == "E" and tag == "E": chunk_start = True if prev_tag == "E" and tag == "I": chunk_start = True if prev_tag == "S" and tag == "E": chunk_start = True if prev_tag == "S" and tag == "I": chunk_start = True if prev_tag == "O" and tag == "E": chunk_start = True if prev_tag == "O" and tag == "I": chunk_start = True if tag != "O" and tag != "." and prev_type != type_: chunk_start = True return chunk_start def f1_score(y_true, y_pred, average="micro", suffix=False): """Compute the F1 score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: >>> from seqeval.metrics import f1_score >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> f1_score(y_true, y_pred) 0.50 """ true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) nb_correct = len(true_entities & pred_entities) nb_pred = len(pred_entities) nb_true = len(true_entities) p = nb_correct / nb_pred if nb_pred > 0 else 0 r = nb_correct / nb_true if nb_true > 0 else 0 score = 2 * p * r / (p + r) if p + r > 0 else 0 return score def accuracy_score(y_true, y_pred): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: >>> from seqeval.metrics import accuracy_score >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> accuracy_score(y_true, y_pred) 0.80 """ if any(isinstance(s, list) for s in y_true): y_true = [item for sublist in y_true for item in sublist] y_pred = [item for sublist in y_pred for item in sublist] nb_correct = sum(y_t == y_p for y_t, y_p in zip(y_true, y_pred)) nb_true = len(y_true) score = nb_correct / nb_true return score def precision_score(y_true, y_pred, average="micro", suffix=False): """Compute the precision. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample. The best value is 1 and the worst value is 0. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: >>> from seqeval.metrics import precision_score >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> precision_score(y_true, y_pred) 0.50 """ true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) nb_correct = len(true_entities & pred_entities) nb_pred = len(pred_entities) score = nb_correct / nb_pred if nb_pred > 0 else 0 return score def recall_score(y_true, y_pred, average="micro", suffix=False): """Compute the recall. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: >>> from seqeval.metrics import recall_score >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> recall_score(y_true, y_pred) 0.50 """ true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) nb_correct = len(true_entities & pred_entities) nb_true = len(true_entities) score = nb_correct / nb_true if nb_true > 0 else 0 return score def performance_measure(y_true, y_pred): """ Compute the performance metrics: TP, FP, FN, TN Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: performance_dict : dict Example: >>> from seqeval.metrics import performance_measure >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'O', 'B-ORG'], ['B-PER', 'I-PER', 'O', 'B-PER']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O', 'B-MISC']] >>> performance_measure(y_true, y_pred) {'TP': 3, 'FP': 3, 'FN': 1, 'TN': 4} """ performance_dict = dict() if any(isinstance(s, list) for s in y_true): y_true = [item for sublist in y_true for item in sublist] y_pred = [item for sublist in y_pred for item in sublist] performance_dict["TP"] = sum( y_t == y_p for y_t, y_p in zip(y_true, y_pred) if ((y_t != "O") or (y_p != "O")) ) performance_dict["FP"] = sum( ((y_t != y_p) and (y_p != "O")) for y_t, y_p in zip(y_true, y_pred) ) performance_dict["FN"] = sum( ((y_t != "O") and (y_p == "O")) for y_t, y_p in zip(y_true, y_pred) ) performance_dict["TN"] = sum( (y_t == y_p == "O") for y_t, y_p in zip(y_true, y_pred) ) return performance_dict def classification_report(y_true, y_pred, digits=2, suffix=False, output_dict=False): """Build a text report showing the main classification metrics. Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a classifier. digits : int. Number of digits for formatting output floating point values. output_dict : bool(default=False). If True, return output as dict else str. Returns: report : string/dict. Summary of the precision, recall, F1 score for each class. Examples: >>> from seqeval.metrics import classification_report >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> print(classification_report(y_true, y_pred)) precision recall f1-score support <BLANKLINE> MISC 0.00 0.00 0.00 1 PER 1.00 1.00 1.00 1 <BLANKLINE> micro avg 0.50 0.50 0.50 2 macro avg 0.50 0.50 0.50 2 weighted avg 0.50 0.50 0.50 2 <BLANKLINE> """ true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) name_width = 0 d1 = defaultdict(set) d2 = defaultdict(set) for e in true_entities: d1[e[0]].add((e[1], e[2])) name_width = max(name_width, len(e[0])) for e in pred_entities: d2[e[0]].add((e[1], e[2])) avg_types = ["micro avg", "macro avg", "weighted avg"] if output_dict: report_dict = dict() else: avg_width = max([len(x) for x in avg_types]) width = max(name_width, avg_width, digits) headers = ["precision", "recall", "f1-score", "support"] head_fmt = "{:>{width}s} " + " {:>9}" * len(headers) report = head_fmt.format("", *headers, width=width) report += "\n\n" row_fmt = "{:>{width}s} " + " {:>9.{digits}f}" * 3 + " {:>9}\n" ps, rs, f1s, s = [], [], [], [] for type_name in sorted(d1.keys()): true_entities = d1[type_name] pred_entities = d2[type_name] nb_correct = len(true_entities & pred_entities) nb_pred = len(pred_entities) nb_true = len(true_entities) p = nb_correct / nb_pred if nb_pred > 0 else 0 r = nb_correct / nb_true if nb_true > 0 else 0 f1 = 2 * p * r / (p + r) if p + r > 0 else 0 if output_dict: report_dict[type_name] = { "precision": p, "recall": r, "f1-score": f1, "support": nb_true, } else: report += row_fmt.format( *[type_name, p, r, f1, nb_true], width=width, digits=digits ) ps.append(p) rs.append(r) f1s.append(f1) s.append(nb_true) if not output_dict: report += "\n" # compute averages nb_true = np.sum(s) for avg_type in avg_types: if avg_type == "micro avg": # micro average p = precision_score(y_true, y_pred, suffix=suffix) r = recall_score(y_true, y_pred, suffix=suffix) f1 = f1_score(y_true, y_pred, suffix=suffix) elif avg_type == "macro avg": # macro average p = np.average(ps) r = np.average(rs) f1 = np.average(f1s) elif avg_type == "weighted avg": # weighted average p = np.average(ps, weights=s) r = np.average(rs, weights=s) f1 = np.average(f1s, weights=s) else: assert False, "unexpected average: {}".format(avg_type) if output_dict: report_dict[avg_type] = { "precision": p, "recall": r, "f1-score": f1, "support": nb_true, } else: report += row_fmt.format( *[avg_type, p, r, f1, nb_true], width=width, digits=digits ) if output_dict: return report_dict else: return report
python
from det3d.core.utils.scatter import scatter_mean from torch.nn import functional as F from ..registry import READERS from torch import nn import numpy as np import torch def voxelization(points, pc_range, voxel_size): keep = (points[:, 0] >= pc_range[0]) & (points[:, 0] <= pc_range[3]) & \ (points[:, 1] >= pc_range[1]) & (points[:, 1] <= pc_range[4]) & \ (points[:, 2] >= pc_range[2]) & (points[:, 2] <= pc_range[5]) points = points[keep, :] coords = ((points[:, [2, 1, 0]] - pc_range[[2, 1, 0]]) / voxel_size[[2, 1, 0]]).to(torch.int64) unique_coords, inverse_indices = coords.unique(return_inverse=True, dim=0) voxels = scatter_mean(points, inverse_indices, dim=0) return voxels, unique_coords def voxelization_virtual(points, pc_range, voxel_size): # current one is hard coded for nuScenes # TODO: fix those magic number keep = (points[:, 0] >= pc_range[0]) & (points[:, 0] <= pc_range[3]) & \ (points[:, 1] >= pc_range[1]) & (points[:, 1] <= pc_range[4]) & \ (points[:, 2] >= pc_range[2]) & (points[:, 2] <= pc_range[5]) points = points[keep, :] real_points_mask = points[:, -2] == 1 painted_points_mask = points[:, -2] == 0 virtual_points_mask = points[:, -2] == -1 # remove zero padding for real points real_points = points[real_points_mask][:, [0, 1, 2, 3, -1]] painted_point = points[painted_points_mask] virtual_point = points[virtual_points_mask] padded_points = torch.zeros(len(points), 22, device=points.device, dtype=points.dtype) # real points will occupy channels 0 to 4 and -1 padded_points[:len(real_points), :5] = real_points padded_points[:len(real_points), -1] = 1 # painted points will occupy channels 5 to 21 padded_points[len(real_points):len(real_points)+len(painted_point), 5:19] = painted_point[:, :-2] padded_points[len(real_points):len(real_points)+len(painted_point), 19] = painted_point[:, -1] padded_points[len(real_points):len(real_points)+len(painted_point), 20] = 1 padded_points[len(real_points):len(real_points)+len(painted_point), 21] = 0 # virtual points will occupy channels 5 to 21 padded_points[len(real_points)+len(painted_point):, 5:19] = virtual_point[:, :-2] padded_points[len(real_points)+len(painted_point):, 19] = virtual_point[:, -1] padded_points[len(real_points)+len(painted_point):, 20] = 0 padded_points[len(real_points)+len(painted_point):, 21] = 0 points_xyz = torch.cat([real_points[:, :3], painted_point[:, :3], virtual_point[:, :3]], dim=0) coords = ((points_xyz[:, [2, 1, 0]] - pc_range[[2, 1, 0]]) / voxel_size[[2, 1, 0]]).to(torch.int64) unique_coords, inverse_indices = coords.unique(return_inverse=True, dim=0) voxels = scatter_mean(padded_points, inverse_indices, dim=0) indicator = voxels[:, -1] mix_mask = (indicator > 0) * (indicator < 1) # remove index voxels = voxels[:, :-1] voxels[mix_mask, :5] = voxels[mix_mask, :5] / indicator[mix_mask].unsqueeze(-1) voxels[mix_mask, 5:] = voxels[mix_mask, 5:] / (1-indicator[mix_mask].unsqueeze(-1)) return voxels, unique_coords @READERS.register_module class DynamicVoxelEncoder(nn.Module): def __init__( self, pc_range, voxel_size, virtual=False ): super(DynamicVoxelEncoder, self).__init__() self.pc_range = torch.tensor(pc_range) self.voxel_size = torch.tensor(voxel_size) self.shape = torch.round((self.pc_range[3:] - self.pc_range[:3]) / self.voxel_size) self.shape_np = self.shape.numpy().astype(np.int32) self.virtual = virtual @torch.no_grad() def forward(self, points): # points list[torch.Tensor] coors = [] voxels = [] for res in points: if self.virtual: voxel, coor = voxelization_virtual(res, self.pc_range.to(res.device), self.voxel_size.to(res.device)) else: voxel, coor = voxelization(res, self.pc_range.to(res.device), self.voxel_size.to(res.device)) voxels.append(voxel) coors.append(coor) coors_batch = [] for i in range(len(voxels)): coor_pad = F.pad(coors[i], (1, 0), mode='constant', value=i) coors_batch.append(coor_pad) coors_batch = torch.cat(coors_batch, dim=0) voxels_batch = torch.cat(voxels, dim=0) return voxels_batch, coors_batch, self.shape_np
python
from fjord.base.tests import eq_, TestCase from fjord.feedback.utils import clean_url, compute_grams class Testclean_url(TestCase): def test_basic(self): data = [ (None, None), ('', ''), ('http://example.com/', 'http://example.com/'), ('http://example.com/#foo', 'http://example.com/'), ('http://example.com/?foo=bar', 'http://example.com/'), ('http://example.com:8000/', 'http://example.com/'), ('ftp://foo.bar/', ''), ('chrome://something', 'chrome://something'), ('about:home', 'about:home'), ] for url, expected in data: eq_(clean_url(url), expected) class TestComputeGrams(TestCase): # FIXME - Beef this up so that we have more comprehensive tests of # the various tokenizing edge cases. def test_basic(self): test_data = [ ('The quick brown fox', [u'brown quick', u'brown fox']), ('the latest update disables the New tab function', [u'disables new', u'function tab', u'new tab', u'latest update', u'disables update']), ('why is firefox so damn slow???? many tabs load slow or not at ' 'all!', [u'load tabs', u'load slow', u'slow tabs', u'damn slow']), ("I'm one of the guys that likes to try Firefox ahead of the " 'herd... usually I use Nightly, but then a while back my ' 'favorite add-on, TabMixPlus stopped working because Firefox ' "redid something in the code. \"No problem,\" says I to myself, " "I'll just use Aurora until they get it fixed.", [u'add-on favorite', u'add-on tabmixplus', u'ahead herd', u'ahead try', u'aurora fixed', u'aurora use', u'code problem', u'code redid', u'favorite nightly', u"guys i'm", u'guys likes', u'herd usually', u"i'll just", u"i'll myself", u'just use', u'likes try', u'myself says', u'nightly use', u'problem says', u'redid working', u'stopped tabmixplus', u'stopped working', u'use usually']), ('Being partially sighted, I found the features with Windows XP ' 'and IE8 extremely usefu;. I need everything in Arial black bold ' 'text.', [u'extremely usefu', u'features sighted', u'windows xp', u'ie8 xp', u'black bold', u'partially sighted', u'need usefu', u'features windows', u'arial need', u'arial black', u'bold text', u'extremely ie8']), ] for text, expected in test_data: eq_(sorted(compute_grams(text)), sorted(expected))
python
from typing import Optional from cdm.enums import CdmObjectType from cdm.objectmodel import CdmAttributeReference, CdmCorpusContext from .cdm_object_ref_persistence import CdmObjectRefPersistence class AttributeReferencePersistence(CdmObjectRefPersistence): @staticmethod def from_data(ctx: CdmCorpusContext, data: str) -> Optional[CdmAttributeReference]: if not data: return None simple_reference = True attribute = data return ctx.corpus.make_ref(CdmObjectType.ATTRIBUTE_REF, attribute, simple_reference)
python
import pandas as pd IN_FILE = 'aus-domain-urls.txt' START_IDX = 0 BLOCK_SIZE = [10, 20, 50, 100, 1000, 100000, 1000000] OUT_FILE_PREFIX = 'aus-domain-urls' data = pd.read_csv(IN_FILE) data_length = len(data) for i in range(len(BLOCK_SIZE)): if i == 0: lower_bound = 0 else: lower_bound = upper_bound if i == len(BLOCK_SIZE) - 1: upper_bound = data_length else: upper_bound = lower_bound + BLOCK_SIZE[i] out_file = '{}_{}_{}_{}.txt'.format(OUT_FILE_PREFIX, lower_bound, upper_bound, upper_bound - lower_bound) (data.iloc[ lower_bound:upper_bound, : ]).to_csv(out_file, header=False, index=None, sep=" ")
python
# Generated by Django 3.2.6 on 2021-10-19 10:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dashboard', '0002_auto_20211019_1613'), ] operations = [ migrations.RemoveField( model_name='bookingrooms', name='room', ), migrations.AddField( model_name='bookingrooms', name='branch', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='category', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='city', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='contact', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='duration', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='email', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='gender', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='hostel_name', field=models.CharField(default='null', max_length=30), ), migrations.AddField( model_name='bookingrooms', name='year', field=models.CharField(default='null', max_length=30), ), migrations.AlterField( model_name='bookingrooms', name='college', field=models.CharField(default='null', max_length=30), ), migrations.AlterField( model_name='bookingrooms', name='cust_name', field=models.CharField(default='null', max_length=30), ), migrations.AlterField( model_name='bookingrooms', name='date', field=models.CharField(default='null', max_length=30), ), migrations.AlterField( model_name='bookingrooms', name='payment', field=models.CharField(default='null', max_length=30), ), migrations.AlterField( model_name='bookingrooms', name='total', field=models.CharField(default='null', max_length=30), ), ]
python
from typing import Protocol class SupportsStr(Protocol): def __str__(self) -> str: ...
python
import os import tensorflow as tf from PIL import Image cwd = os.getcwd()+'/train/' for root, dirs, files in os.walk(cwd): print(dirs) # 当前路径下所有子目录 classes = dirs break print(cwd) writer = tf.python_io.TFRecordWriter("train.tfrecords") for index, name in enumerate(classes): class_path = cwd + name + "/" print(class_path) for img_name in os.listdir(class_path): img_path = class_path + img_name img = Image.open(img_path) img = img.resize((224, 224)) if img.mode != 'RGB': print(img_path) img_raw = img.tobytes() #将图片转化为原生bytes example = tf.train.Example(features=tf.train.Features(feature={ "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])), 'img': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])) })) writer.write(example.SerializeToString()) #序列化为字符串 writer.close()
python
# -*- coding: utf-8 -*- # (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved. # pragma pylint: disable=unused-argument, no-self-use """ Incident poller for a ProofPoint TRAP server """ import logging from resilient_circuits import ResilientComponent, handler from fn_scheduler.components import SECTION_SCHEDULER from fn_scheduler.lib.scheduler_helper import ResilientScheduler from fn_scheduler.lib.resilient_helper import validate_app_config """ Summary: Start the scheduler """ log = logging.getLogger(__name__) class FunctionComponent(ResilientComponent): """Component that polls for new data arriving from Proofpoint TRAP""" def __init__(self, opts): """constructor provides access to the configuration options""" super(FunctionComponent, self).__init__(opts) options = opts.get(SECTION_SCHEDULER, {}) validate_app_config(options) self.timezone = options.get("timezone") self.scheduler = ResilientScheduler(options.get("db_url"), options.get("datastore_dir"), options.get("thread_max"), options.get("timezone")) log.info("Scheduler started") @handler("reload") def _reload(self, event, opts): """Configuration options have changed, save new values""" self.opts = opts options = opts.get(SECTION_SCHEDULER, {}) validate_app_config(options) # TODO restart the scheduler
python
import os import pandas as pd from bento.common import datautil, logger, util logging = logger.fancy_logger(__name__) def load_covid_raw_data(data_path, base, cases, deaths, nrows=None): read_args = {} if nrows: read_args["nrows"] = nrows idf = pd.read_csv(f"{data_path}/{base}/{cases}").drop(["Lat", "Long"], axis=1) idf = idf.melt( id_vars=["Province/State", "Country/Region"], var_name="date", value_name="cases", ) idf = idf.groupby(["date", "Country/Region"]).sum().reset_index() # Add on deaths ddf = pd.read_csv(f"{data_path}/{base}/{deaths}").drop(["Lat", "Long"], axis=1) ddf = ddf.melt( id_vars=["Province/State", "Country/Region"], var_name="date", value_name="deaths", ) ddf = ddf.groupby(["date", "Country/Region"]).sum() idf = idf.join(ddf, on=["date", "Country/Region"]).rename( columns={"Country/Region": "country"} ) idf.loc[:, "date"] = pd.to_datetime(idf["date"]) idf = idf.sort_values("date") return idf def add_country_reference(raw_df, ref_df): # Drop some hard to handle, more obscure areas drop_entries = [ "Diamond Princess", "West Bank and Gaza", "Kosovo", "Holy See", "MS Zaandam", "Eritrea", "Western Sahara", ] idf = raw_df.copy() idf = idf.loc[~idf.country.isin(drop_entries)] # Change some unrecognized entries modifications = { "Burma": ("country", "Myanmar"), "US": ("country", "United States"), "Korea, South": ("country", "Korea, Republic of"), } for name, mod in modifications.items(): idf.loc[idf.country == name, mod[0]] = mod[1] reference = tuple(ref_df["country"].unique()) mismatch = set(idf["country"].unique()) - set(reference) for country in mismatch: match_name = datautil.fuzzy_search(country, reference) logging.debug(f"Missing '{country}', assigning {match_name}") idf.loc[idf.country == country, "country"] = match_name logging.info(f"Total country name mismatches: {len(mismatch)}") idf = idf.join(ref_df.set_index("country"), on="country") return idf def process_covid_data(idf): idf["cases_per_100k"] = idf["cases"] * 1e5 / idf["population"] idf["deaths_per_100k"] = idf["deaths"] * 1e5 / idf["population"] idf = idf.drop(["population"], axis=1) return idf def load(nrows=None): data_path = f"{os.environ['APP_HOME']}/{os.environ['DATA_DIR']}" base = f"jhopkins-covid-19/csse_covid_19_data/csse_covid_19_time_series" cases = "time_series_covid19_confirmed_global.csv" deaths = "time_series_covid19_deaths_global.csv" raw_df = load_covid_raw_data(data_path, base, cases, deaths) ref_df = datautil.df_loader("world_country_reference.csv") jdf = add_country_reference(raw_df, ref_df) pdf = process_covid_data(jdf) data = datautil.autostructure(pdf) return data
python
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabs18_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'business_types', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): """ BusinessTypes must be one to three letters in length. BusinessTypes values must be non-repeated letters from A to X. """ det_award = DetachedAwardFinancialAssistanceFactory(business_types='A', correction_delete_indicatr='') det_award_2 = DetachedAwardFinancialAssistanceFactory(business_types='XB', correction_delete_indicatr=None) det_award_3 = DetachedAwardFinancialAssistanceFactory(business_types='RCm', correction_delete_indicatr='c') det_award_4 = DetachedAwardFinancialAssistanceFactory(business_types='rcm', correction_delete_indicatr='C') # Ignore correction delete indicator of D det_award_5 = DetachedAwardFinancialAssistanceFactory(business_types='BOB', correction_delete_indicatr='d') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database): """ BusinessTypes must be one to three letters in length. BusinessTypes values must be non-repeated letters from A to X. """ # Test if it's somehow empty or has 4 letters (length test) det_award = DetachedAwardFinancialAssistanceFactory(business_types='', correction_delete_indicatr='') det_award_2 = DetachedAwardFinancialAssistanceFactory(business_types='ABCD', correction_delete_indicatr='c') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2]) assert errors == 2 # Test repeats det_award = DetachedAwardFinancialAssistanceFactory(business_types='BOb', correction_delete_indicatr='') det_award_2 = DetachedAwardFinancialAssistanceFactory(business_types='BOB', correction_delete_indicatr='c') det_award_3 = DetachedAwardFinancialAssistanceFactory(business_types='BbO', correction_delete_indicatr='') det_award_4 = DetachedAwardFinancialAssistanceFactory(business_types='BB', correction_delete_indicatr='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4]) assert errors == 4 # Test that only valid letters work det_award = DetachedAwardFinancialAssistanceFactory(business_types='ABY', correction_delete_indicatr='') det_award_2 = DetachedAwardFinancialAssistanceFactory(business_types='C2', correction_delete_indicatr='c') det_award_3 = DetachedAwardFinancialAssistanceFactory(business_types='c2d', correction_delete_indicatr='') det_award_4 = DetachedAwardFinancialAssistanceFactory(business_types='123', correction_delete_indicatr='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4]) assert errors == 4
python
from rtm.api import validate def test_validate(rtm_path): validate(rtm_path)
python
from sqlalchemy import create_engine from td.client import TDClient from datetime import datetime from td import exceptions from requests.exceptions import ConnectionError import datetime import pandas as pd import sqlite3 import time import credentials print("- Modules imported -") def make_sqlite_table(table_name): engine = create_engine('sqlite:///Options_temp.db', echo=False) table_columns = pd.DataFrame(columns=columns_wanted) table_columns.to_sql(table_name, con=engine) return 0 def add_rows(clean_data, table_name): global file_date engine = create_engine(f'sqlite:///Data/Options_{file_date}.db', echo=False) clean_data.to_sql(table_name, con=engine, if_exists='append', index_label='index') return 0 def delete_row(table_name, column, argument): conn = sqlite3.connect('Options.db') con = conn.cursor() con.execute(f'DELETE FROM {table_name} WHERE {column}={argument}') conn.commit() conn.close() return 0 def delete_db_table(table_name): conn = sqlite3.connect('options.db') con = conn.cursor() con.execute(f'DROP TABLE {table_name}') conn.commit() conn.close() return 0 def show_db_table(puts_calls): conn = sqlite3.connect('options.db') con = conn.cursor() for row in con.execute(f'SELECT * FROM {puts_calls}'): print(row) conn.close() return 0 TDSession = TDClient( client_id=credentials.client_id, redirect_uri='https://127.0.0.1', credentials_path=credentials.json_path # Users/user/.../Project/td_state.json ) TDSession.login() print("- TD connection made -") def human_time(epoch): new_time = datetime.fromtimestamp(int(epoch) / 1000) output = new_time.strftime('%Y-%m-%d %H:%M:%S') return output def get_time_now(): curr_time = time.localtime() curr_clock = time.strftime("%H:%M:%S", curr_time) curr_m = time.strftime('%m') curr_y_d = time.strftime('%d%Y') int_curr_clock = int(f'{curr_clock[:2]}{curr_clock[3:5]}') return int_curr_clock, curr_m, curr_y_d def history(symbol): quotes = TDClient.get_price_history(TDSession, symbol=symbol, period_type='day', period=1, frequency_type='minute', frequency=1, extended_hours=False) # start_date = 1606086000000, end_date = 1606341600000, return quotes cur_weekly = 0 cur_stocks = ['AAPL'] ''' test_quotes_2D = TDClient.get_quotes(TDSession, instruments=['AMD', 'AAPL']) def stats_list(): stats_wanted = ['symbol', 'bidPrice', 'bidSize', 'bidId', 'askPrice', 'askId', 'lastPrice', 'lastSize', 'lastId', 'openPrice', 'highPrice', 'lowPrice', 'bidTick', 'closePrice', 'netChange', 'totalVolume', 'quoteTimeInLong', 'tradeTimeInLong', 'exchange', 'exchangeName', 'volatility', 'regularMarketLastPrice', 'regularMarketNetChange', 'regularMarketTradeTimeInLong', 'netPercentChangeInDouble', 'markChangeInDouble', 'markPercentChangeInDouble', 'regularMarketPercentChangeInDouble'] output_stats = [] for key in test_quotes_2D['AMD'].keys(): for i in stats_wanted: if key == i: output_stats.append(key) return output_stats ''' file_date = 0 trade_days_2021 = {'jan': [4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 19, 20, 21, 22, 25, 26, 27, 28, 29], 'feb': [1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 16, 17, 18, 19, 22, 23, 24, 25, 26], 'mar': [1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 29, 30, 31], 'apr': [5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 26, 27, 28, 29, 30], 'may': [3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 17, 18, 19, 20, 21, 24, 25, 26, 27, 28], 'jun': [1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 28, 29, 30], 'jul': [1, 2, 6, 7, 8, 9, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 26, 27, 28, 29, 30], 'aug': [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20, 23, 24, 25, 26, 27, 30, 31], 'sep': [1, 2, 3, 7, 8, 9, 10, 13, 14, 15, 16, 17, 20, 21, 22, 23, 24, 27, 28, 29, 30], 'oct': [1, 4, 5, 6, 7, 8, 12, 13, 14, 15, 18, 19, 20, 21, 22, 25, 26, 27, 28, 29], 'nov': [1, 2, 3, 4, 5, 8, 9, 10, 12, 15, 16, 17, 18, 19, 22, 23, 24, 29, 30], 'dec': [1, 2, 3, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17, 20, 21, 22, 27, 28, 29, 30]} opt_column_names = ['putCall', 'symbol', 'description', 'exchangeName', 'bid', 'ask', 'last', 'mark', 'bidSize', 'askSize', 'bidAskSize', 'lastSize', 'highPrice', 'lowPrice', 'openPrice', 'closePrice', 'totalVolume', 'tradeDate', 'tradeTimeInLong', 'quoteTimeInLong', 'netChange', 'volatility', 'delta', 'gamma', 'theta', 'vega', 'rho', 'openInterest', 'timeValue', 'theoreticalOptionValue', 'theoreticalVolatility', 'optionDeliverablesList', 'strikePrice', 'expirationDate', 'daysToExpiration', 'expirationType', 'lastTradingDay', 'multiplier', 'settlementType', 'deliverableNote', 'isIndexOption', 'percentChange', 'markChange', 'markPercentChange', 'mini', 'inTheMoney', 'nonStandard'] columns_unwanted = ['description', 'mark', 'bidSize', 'askSize', 'bidAskSize', 'lastSize', 'tradeDate', 'tradeTimeInLong', 'theoreticalOptionValue', 'optionDeliverablesList', 'expirationType', 'lastTradingDay', 'multiplier', 'settlementType', 'deliverableNote', 'isIndexOption', 'markChange', 'markPercentChange', 'nonStandard', 'inTheMoney', 'mini'] columns_wanted = ['putCall', 'symbol', 'exchangeName', 'bid', 'ask', 'last', 'highPrice', 'lowPrice', 'openPrice', 'closePrice', 'totalVolume', 'quoteTimeInLong', 'netChange', 'volatility', 'delta', 'gamma', 'theta', 'vega', 'rho', 'openInterest', 'timeValue', 'theoreticalVolatility', 'strikePrice', 'expirationDate', 'daysToExpiration', 'percentChange'] stocks = ['AAL', 'AAPL', 'AMD', 'AMZN', 'APA', 'ATVI', 'AXP', 'BABA', 'CME', 'CMG', 'CSCO', 'DAL', 'DIS', 'EA', 'FB', 'GME', 'GOOG', 'GS', 'HD', 'IBM', 'JNJ', 'JPM', 'MCD', 'MSFT', 'MU', 'NEE', 'NFLX', 'NVDA', 'ORCL', 'PEP', 'PYPL', 'QQQ', 'ROKU', 'SBUX', 'SNAP', 'SPY', 'SQ', 'TSLA', 'TWTR', 'ULTA', 'UPS', 'V', 'VXX', 'WMT', 'YUM', 'VDE', 'XLB', 'XLI', 'VCR', 'VDC', 'XLV', 'XLF', 'VGT', 'XLC', 'XLU', 'VNQ'] # This segment was used to sort out unique columns after i hard coded the columns i wanted ''' # print(len(opt_column_names)) # print(len(columns_unwanted)) # print(len(columns_wanted)) # print(len(stocks)) outs = [] def unique_list(n): output = [] for x in n: if x not in output: output.append(x) else: print(x) print(len(output)) return 0 for i in opt_column_names: for j in columns_wanted: if i == j: outs.append(i) print(outs) print(len(outs)) unique_list(outs) ''' trade_stocks = ['AAPL', 'SPY', 'ROKU', 'TSLA', 'GME'] def get_weekly_data(clean): # get data for just the stuff we want to use for r in clean.iterrows(): if r[1][-2] == 'symbol': print(r[1]) if r[0] == 'bid': print(r[1]) print(r[1][2]) return 0 def get_stock(stock): # pass an array of ticker(s) for stock stock_lookup = TDSession.get_quotes(instruments=stock) return stock_lookup def raw_stock(raw): clean_stock_data = [[]] for i in raw.keys(): print(i) return clean_stock_data def pandas_stock_data(arr): pandas_data = [] return pandas_data def get_next_stock(): global pulls global failed_pulls for stock in trade_stocks: error = False try: stock_data = get_stock(stock) except (exceptions.ServerError, exceptions.GeneralError, exceptions.ExdLmtError, ConnectionError): error = True failed_pulls = failed_pulls + 1 print('A server error occurred') if not error: try: clean_stock_data = pandas_stock_data(raw_stock(stock_data)) # add_rows(clean_stock_data) UNCOMMENT TO ADD TO STOCKS.DB pulls = pulls + 1 except ValueError: print(ValueError.with_traceback()) print(f'{stock} did not have values for this iteration') failed_pulls = failed_pulls + 1 print(stock) time.sleep(1) return 0 def get_chain(stock): opt_lookup = TDSession.get_options_chain( option_chain={'symbol': stock, 'strikeCount': 50, 'toDate': '2021-4-23'}) return opt_lookup def raw_chain(raw, put_call): cp = f'{put_call}ExpDateMap' clean_data = [[]] r = -1 for k in raw[cp].keys(): # print(k, raw[k], '\n') for strike in raw[cp][k].keys(): # print(strike, raw[k][strike]) for a in raw[cp][k][strike][0].keys(): # if r == -1: # print(raw[cp][k][strike][0].keys()) unit = raw[cp][k][strike][0][a] if unit == put_call.upper(): r = r + 1 if r > 0: clean_data.append([]) clean_data[r].append(unit) return clean_data def pandas_chain(clean): df_cp = pd.DataFrame(clean, columns=opt_column_names) panda_data = df_cp.drop(columns=columns_unwanted) return panda_data pulls = 0 failed_pulls = 0 def get_next_chains(): x = 0 global pulls global failed_pulls global cur_stocks for stock in stocks: error = False try: chain = get_chain(stock) except (exceptions.ServerError, exceptions.GeneralError, exceptions.ExdLmtError, ConnectionError): error = True failed_pulls = failed_pulls + 1 print('A server error occurred') if not error: try: clean = pandas_chain(raw_chain(chain, 'call')) add_rows(clean, 'calls') for s in cur_stocks: if s == stock: get_weekly_data(clean) pulls = pulls + 1 except ValueError: print(ValueError.with_traceback()) print(f'{x}: Calls for {stock} did not have values for this iteration') failed_pulls = failed_pulls + 1 try: get_clean = pandas_chain(raw_chain(chain, 'put')) add_rows(get_clean, 'puts') pulls = pulls + 1 except ValueError: print(f'{x}: Puts for {stock} did not have values for this iteration') failed_pulls = failed_pulls + 1 print(f'{x}: {stock}') x = x + 1 time.sleep(2) return 0 # |SQLite management| # # # make_sqlite_table('calls') # inputs: puts|calls # make_sqlite_table('puts') # inputs: puts|calls # delete_db_table('calls') # delete_db_table('puts') # show_db_table('calls') # show_db_table('puts') # add_rows(clean_chain(raw_chain(get_chain('SPY'), 'put')), 'puts') # raw_chain(,'put|call')), 'puts|calls') # delete_row('puts', '', 1321354652) def main(): global file_date global trade_stocks t, mon, day = get_time_now() mon = list(trade_days_2021.keys())[int(mon) - 1] ''' # uncomment for LIVE while True: if (t < 930) or (t > 1600): print(f'{t}: Market closed {mon}{day}'.upper()) time.sleep(10) else: break ''' # uncomment below line when TESTING on live data file_date = f'temp' # uncomment below line to save and analyze live data # file_date = f'{mon}{day}' pull_count = 0 end_t = 1600 while get_time_now()[0]: # < end_t: insert segment to run LIVE # get_next_stock() get_next_chains() pull_count = pull_count + 1 print(pull_count) print('option market closed') print(f'failed_pulls: {failed_pulls}') print(f'pulls: {pulls}') return 0 main()
python
import sys, os, subprocess, shutil, time BUILDDIR = os.path.abspath("build") NINJA_EXE = "ninja.exe" NINJA_BUILD_FILE = "build/build.ninja" CALL_PATH = os.getcwd() TOOL_PATH = sys.path[0] + "/" TOOLCHAIN_PATH = os.path.dirname(sys.path[0]) NO_EMOJI = False NO_COLOR = False SELECTION = None SECONDARY = None CMAKE_EXTRA = "-DTOOLCHAIN_OFFSET:STRING={} ".format(TOOLCHAIN_PATH) SKIP_PREBUILD = False ONLY_CONFIG = False NEW_BUILD = False NO_NINJA = False class Text: @staticmethod def error(text): return "\033[91m\033[1m\033[4m" + text + "\033[0m" @staticmethod def recoverableError(text): return "\033[31m" + text + "\033[0m" @staticmethod def underline(text): return "\033[4m" + text + "\033[0m" @staticmethod def bold(text): return "\033[1m" + text + "\033[0m" @staticmethod def header(text): return "\033[1m\033[4m" + text + "\033[0m" @staticmethod def warning(text): return "\033[93m\033[1m" + text + "\033[0m" @staticmethod def important(text): return "\033[94m\033[1m" + text + "\033[0m" @staticmethod def reallyImportant(text): return "\033[94m\033[1m\033[4m" + text + "\033[0m" @staticmethod def green(text): return "\033[92m" + text + "\033[0m" @staticmethod def success(text): return "\033[92m\033[1m" + text + "\033[0m" @staticmethod def red(text): return "\033[91m" + text + "\033[0m" @staticmethod def blue(text): return "\033[94m" + text + "\033[0m" @staticmethod def cyan(text): return "\033[96m" + text + "\033[0m" @staticmethod def magenta(text): return "\033[95m" + text + "\033[0m" @staticmethod def gray(text): return "\033[0;90m" + text + "\033[0m" @staticmethod def yellow(text): return "\033[93m" + text + "\033[0m" @staticmethod def darkYellow(text): return "\033[33m" + text + "\033[0m" @staticmethod def darkGreen(text): return "\033[32m" + text + "\033[0m" @staticmethod def darkRed(text): return "\033[31m" + text + "\033[0m" @staticmethod def darkBlue(text): return "\033[34m" + text + "\033[0m" @staticmethod def darkCyan(text): return "\033[36m" + text + "\033[0m" @staticmethod def darkMagenta(text): return "\033[35m" + text + "\033[0m" exitCode = 0 exitError = None def runCommand(cmd: str): global exitCode, exitError print() result = subprocess.run(cmd, shell=True) exitCode = result.returncode exitError = result.stderr return exitCode usageMap = { "Valid options": Text.header("Valid options"), "Valid flags": Text.header("Valid flags"), "Prebuild Script": Text.header("Prebuild Script"), "Example Usage": Text.header("Example Usage"), "build": Text.warning("build"), "upload": Text.warning("upload"), "clean": Text.warning("clean"), "reset": Text.warning("reset"), "config": Text.warning("config"), "disable": Text.warning("disable"), "s": Text.gray("-s"), "com_port": Text.bold(Text.darkCyan("com_port")), "cmake_defs": Text.bold(Text.gray("cmake_defs")), "Pre_Build": Text.magenta("`Pre_Build`"), "bat": Text.cyan("`.bat`"), "ps1": Text.cyan("`.ps1`"), "py": Text.cyan("`.py`"), "Usage": "{} [{}] [{}] [{}]".format( Text.important("config.py"), Text.warning("option"), Text.bold(Text.gray("-s")), Text.bold(Text.gray("cmake_defs")) + "|" + Text.bold(Text.darkCyan("com_port")), ), "exUsage": "{} {} {}".format( Text.important("config.py"), Text.warning("build"), Text.gray("-s -DCUSTOM_BUILD_PATH_PREFIX:STRING=build/Pre_Build/") ), } msg = """ {Usage} {Valid options} {clean} \t: Cleanup build files {build}\t[{cmake_defs}]\t: Build project, configuring if necessary {upload}\t[{com_port}]\t: Upload binary file to a connected teensy {disable}\t[{com_port}]\t: Put a connected teensy into programming mode {reset}\t[{cmake_defs}]\t: Refresh project to a clean configured state {config}\t[{cmake_defs}]\t: Reconfigure cmake project, can pass \t extra defines {cmake_defs} for cmake {Valid flags} {s} \t: Skip any {Pre_Build} script that exists {Prebuild Script} If a script is named {Pre_Build} and is at the root of a project it will be run before configuring CMake It can be a {bat}, {ps1}, or {py} Only one is run, prefering the file type is that order {Example Usage} {exUsage} """.format_map( usageMap ) def usage(): print(msg) sys.exit() def endScript(errMsg: str = None): global exitCode, exitError if exitCode != 0 or errMsg: if errMsg: print(errMsg) if exitError: print() print(bytes.decode(exitError)) print(Text.error("\nTask Failed ❌")) sys.exit(1) else: print(Text.success("\nTask Succeeded ✔")) sys.exit() TEENSY_CORE_PREFIX = "TEENSY_CORE_NAME:INTERNAL=" FINAL_OUTPUT_FILE_PREFIX = "FINAL_OUTPUT_FILE:INTERNAL=" TEENSY_CORE_NAME = None FINAL_OUTPUT_FILE = None def populateCMAKEVars(): global TEENSY_CORE_NAME, FINAL_OUTPUT_FILE with open(BUILDDIR + "\\CMakeCache.txt", "r") as f: for line in f: if line.find(FINAL_OUTPUT_FILE_PREFIX) != -1: FINAL_OUTPUT_FILE = line.removeprefix(FINAL_OUTPUT_FILE_PREFIX).rstrip() elif line.find(TEENSY_CORE_PREFIX) != -1: TEENSY_CORE_NAME = line.removeprefix(TEENSY_CORE_PREFIX).rstrip() def compile(): global FINAL_OUTPUT_FILE print(Text.reallyImportant("\nBuilding ⏳")) if runCommand("cd build && " + TOOL_PATH + NINJA_EXE + " -j16") != 0: endScript(Text.error("Ninja failed to build ⛔")) print(Text.success("\nBuild Finished 🏁")) populateCMAKEVars() if not FINAL_OUTPUT_FILE: endScript(Text.error("Final binary file was not found ⛔")) else: print(Text.important("Ready to Upload 🔌")) endScript() def preBuild(): if SKIP_PREBUILD: print(Text.warning("Skipping Pre_Build script")) else: code = None if os.path.isfile("Pre_Build.bat"): code = runCommand("Pre_Build.bat") elif os.path.isfile("Pre_Build.ps1"): code = runCommand("Pre_Build.ps1") elif os.path.isfile("Pre_Build.py"): code = runCommand("Pre_Build.py") else: return if code != 0: endScript(Text.error("Pre_Build script failed ⛔")) def build(): print(Text.header("Build Project")) if NO_NINJA: fullClean() config() compile() def disable(): runCommand(TOOL_PATH + "ComMonitor.exe {} 134 -c --priority".format(SECONDARY)) def upload(): print(Text.header("Upload Binary ⚡")) populateCMAKEVars() if not FINAL_OUTPUT_FILE: endScript(Text.error("Final binary file was not found ⛔")) elif not SECONDARY: print(Text.warning("Warning! no port defined, unable to auto reboot ⚠")) else: disable() time.sleep(1.5) tries = 1 while True: if runCommand(TOOL_PATH + "teensy_loader_cli.exe -mmcu={} -v {}".format(TEENSY_CORE_NAME, FINAL_OUTPUT_FILE)) == 0: print(Text.success("\nGood to go ✔")) endScript() elif tries == 0: break else: print(Text.recoverableError("Failed to upload once ✖")) tries -= 1 endScript(Text.error("Failed to upload")) def config(): print(Text.header("Configure Project")) preBuild() print(Text.bold("Configuring CMake project ⚙")) if runCommand("cd build && cmake .. -G Ninja {}".format(CMAKE_EXTRA)) != 0: endScript(Text.error("\nFailed to configure cmake")) elif ONLY_CONFIG: endScript() def clean(): if NO_NINJA: print(Text.error("Project is invalid")) endScript(Text.recoverableError("Consider running config or reset")) print(Text.important("Cleaning 🧹")) if runCommand("cd build && " + TOOL_PATH + NINJA_EXE + " clean") != 0: endScript(Text.error("Error cleaning up build files")) def fullClean(): shutil.rmtree(BUILDDIR) os.mkdir(BUILDDIR) def reset(): global ONLY_CONFIG print(Text.red("Resetting Project")) ONLY_CONFIG = True if not NEW_BUILD: print(Text.important("Hard Cleaning 🧼🧽")) fullClean() config() # Begin Script if len(sys.argv) < 2: usage() SELECTION = sys.argv[1].strip(" '\"").upper() if len(sys.argv) > 2: SECONDARY = sys.argv[2].strip(" '\"").upper() SKIP_PREBUILD = SECONDARY == "-S" if SKIP_PREBUILD: CMAKE_EXTRA += " ".join(sys.argv[3:]) else: CMAKE_EXTRA += " ".join(sys.argv[2:]) if not os.path.isdir(BUILDDIR): os.mkdir(BUILDDIR) NEW_BUILD = True NO_NINJA = not os.path.isfile(NINJA_BUILD_FILE) print() if SELECTION == "BUILD": build() elif SELECTION == "UPLOAD": upload() elif SELECTION == "CONFIG": ONLY_CONFIG = True config() elif SELECTION == "CLEAN": clean() elif SELECTION == "RESET": reset() elif SELECTION == "DISABLE": disable() endScript()
python
#!/usr/bin/python """ This plugin implements identifying the modbusRTU protocol for serial2pcap. Modbus RTU Frame Format: Name Length (bits) Function Start 28 At least 3.5 (28 bits) character times of silence Address 8 Function 8 Data n*8 CRC 16 End 28 At Least 3.5 (28 bits) character times of silence between frames This plugin identifies ModbusRTU frames by matching data to CRC's. The plugin forward slices through received data (up to 256 bytes - max RTU ADU size) and computes the data so far to the next two bytes. If a CRC match is found then the plugin assumes that it has found a valid RTU frame. """ from PluginCore import PluginCore from ctypes import c_ushort class ModbusRTU(PluginCore): ProtocolName = "modbusRTU" ProtocolDescription = "Modbus RTU Frame Format Serial Protocol" crc16_tab = [] crc16_constant = 0xA001 def __init__(self): if not len(self.crc16_tab): self.init_crc16() #CRC code derived and modified from PyCRC - Github cristianav/PyCRC - GPLv3 license #https://github.com/cristianav/PyCRC/blob/master/PyCRC/CRC16.py def calculate(self, input_data): is_string = isinstance(input_data, str) is_bytes = isinstance(input_data, (bytes, bytearray)) #if not is_string and not is_bytes: # raise Exception("input data type is not supported") crc_value = 0xFFFF for c in input_data: d = ord(c) tmp = crc_value ^ d rotated = crc_value >> 8 crc_value = rotated ^ self.crc16_tab[(tmp & 0x00ff)] #added this to rotate the bytes. RTU transmits CRC in a different endian crc_low = crc_value & 255 crc_high = crc_value >> 8 return (crc_low << 8) ^ crc_high def init_crc16(self): for i in range(0,256): crc = c_ushort(i).value for j in range(0,8): if crc & 0x0001: crc = c_ushort(crc >> 1).value ^ self.crc16_constant else: crc = c_ushort(crc >> 1).value self.crc16_tab.append(crc) #end derived code def Identify(self, data, capture_info): #sizes do not include 2 byte checksum LOWER_SLICE_LIMIT = 6 #min Modbus RTU Size 8 UPPER_SLICE_LIMIT = 254 #max Modbus RTU Size 256 #if not enough data then wait if len(data) <= LOWER_SLICE_LIMIT: return (PluginCore.Status.TOOSHORT,0) sliceat = LOWER_SLICE_LIMIT while sliceat <= UPPER_SLICE_LIMIT: #make sure there is enough data if len(data) < sliceat + 2: return (PluginCore.Status.TOOSHORT,0) #calculate CRC at slice calc_crc = self.calculate(data[:sliceat]) #get test CRC from data recv_crc = (ord(data[sliceat]) << 8) ^ ord(data[sliceat + 1]) #check to see if calculated and received CRC match - if so then assume good packet if calc_crc == recv_crc: return (PluginCore.Status.OK,sliceat+2) sliceat += 1 #if no packet was found then signal unknown return (PluginCore.Status.UNKNOWN,0)
python
import sys import DiveConstants as dc from rpy2.rinterface import NA from rpy2.robjects.vectors import IntVector, FloatVector, StrVector import rpy2.robjects.packages as rpackages import rpy2.robjects as robjects import numpy as np np.set_printoptions(suppress=True) utils = rpackages.importr('utils') scuba = rpackages.importr('scuba') def max_ascent(dive): """ finds the maximum ascent rate :param dive: dataframe: a dataframe containing columns: time and depth :return: float: the maximum ascent rate """ max = 0 # finds maximum positive difference between each time interval for i in range(len(dive[1])): try: temp = dive[1][i+1] if (dive[1][i] - temp) > max: max = dive[1][i] - temp except IndexError: pass return round(max/10, 3) def compartment_pressures(data, halftime_set): """ Gets compartment pressures from dive profile based on given half time set. :param data: dataframe: a dataframe containing columns: time and depth :param halftime_set: str: the name of the halftime set to be used :return: cp a dataframe containing compartment pressures from 1,1b - 16 """ # setup R functions dive = robjects.r['dive'] haldane = robjects.r['haldane'] pickmodel = robjects.r['pickmodel'] data_frame = robjects.r['data.frame'] nitrox = robjects.r['nitrox'] dive_profile = dive(data, gas=nitrox(0.21)) # check if halftime_set is one of the allowed halftime sets, raise exception if not. if(not(halftime_set == 'ZH-L16A' or halftime_set == 'Haldane' or halftime_set == 'DSAT' or halftime_set == 'Workman65' or halftime_set == 'Buzzacott')): raise ValueError('Invalid halftime-set') else: # if halftime set is decimate, set up decimate model. if(halftime_set == 'Buzzacott'): hm = robjects.r['hm'] decimate_model = hm(HalfT=IntVector((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)), M0=IntVector(( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)), dM=IntVector((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))) cp = haldane(dive_profile, model=decimate_model, progressive=True) # for all other models, set up normally else: cp = haldane(dive_profile, model=pickmodel( halftime_set), progressive=True) # return the compartment pressures as dataframe return data_frame(cp) def max_values(ambient_pressures, compartment_pressures, totalIPP): """ merges max_bubble, max_inspired into a single function :param ambient_pressures: float[]: a list of ambient pressures at each time point :param compartment_pressures: float[]: a list of compartment pressure values :param totalIPP: float[]: the total inert gas partial pressure at given time points :return: float[]: max_values : array containing 4 collumns: maxins, maxbub, the cp where maxbub occured, and surf the cp when the diver surfaces. """ # get compartment pressures and ambient pressure data cp = compartment_pressures ap = ambient_pressures # initialize output array, array is same length as comparment pressures max_values = np.zeros((len(cp), 5)) for i in range(len(cp)): maxbub = 0 maxins = -sys.maxsize n2cp = 0 hecp = 0 # find the maximum positive difference of inert gas against ambient pressure (pressure @ compartment - ambient pressure @ that depth) # find the maximum positive difference of inert gas inside each compartment for j in range(len(cp[i])): try: # nparr does [row,col] # dataframe does [col][row] tempbub = (cp[i][j] - ap[j, 1]) # cp[i][j] tempins = (cp[i][j] - totalIPP[j]) if(tempbub > maxbub): maxbub = tempbub n2cp = cp[i][j] if(len(cp)>17): hecp = cp[i+17][j] if(tempins > maxins): maxins = tempins except IndexError: pass max_values[i][0] = maxins max_values[i][1] = maxbub max_values[i][2] = n2cp max_values[i][3] = hecp max_values[i][4] = cp[i][len(cp[i])-1] return max_values # TODO: allow this to take in raw csv or a dataframe def ambient_pressures(dive_csv): """ calculates ambient pressures :param dive_csv: dataframe: a dataframe containing columns: time and depth :return: float[]: a list of ambient pressures at each time point """ # R function setup data_frame = robjects.r['data.frame'] # get dive data (times/depths) df = data_frame(dive_csv) # initialize output array ap = np.zeros((len(df[0]), len(df))) for i in range(len(df[0])): # nparr does [row,col] # dataframe does [col][row] ap[i, 0] = df[0][i] ap[i, 1] = df[1][i]/10 + 1 return ap def max_inspired(compartment_pressures, totalIPP): """ calculates the maximum positive difference between the inert gas pressure inside each compartment (1-17, but it should be 1-16 with both 1 and 1b included) and the partial pressure of inert gas in the breathing mixture at each respective time and depth. :param: compartment_pressures: float[]: a list of compartment pressure values :param totalIPP: float[]: the total inert gas partial pressure at given time points :return: float[]: the maximum inspired difference for each compartment A list containing the maximum positive differences of inert gas against totalIPP (pressure @ compartment - totalIPP @ that depth) """ # get compartment pressures and ambient pressure data cp = compartment_pressures # initialize output array, array is same length as comparment pressures maxins = np.zeros(len(cp)) for i in range(len(cp)): max = -sys.maxsize # find the maximum positive difference of inert gas against totalIPP (pressure @ compartment - totalIPP @ that depth) for j in range(len(cp[i])): try: # nparr does [row,col] # dataframe does [col][row] tempmax = (cp[i][j] - totalIPP[j]) # cp[i][j] if(tempmax > max): max = tempmax maxins[i] = max except IndexError: pass return maxins def max_bubble(ambient_pressures, compartment_pressures): """ calculates the maximum positive difference between the inert gas pressure inside each compartment (1-17, but it should be 1-16 with both 1 and 1b included) :param ambient_pressures: float[]: a list of ambient pressures at each time point :param compartment_pressures: float[]: a list of compartment pressure values :return: float[]: the maximum bubble difference for each compartment """ # get compartment pressures and ambient pressure data cp = compartment_pressures ap = ambient_pressures # initialize output array, array is same length as comparment pressures maxbubs = np.zeros((len(cp), 2)) for i in range(len(cp)): max = -sys.maxsize n2cp = 0 # find the maximum positive difference of inert gas against ambient pressure (pressure @ compartment - ambient pressure @ that depth)cls for j in range(len(cp[i])): try: # nparr does [row,col] # dataframe does [col][row] tempbub = (cp[i][j] - ap[j, 1]) # cp[i][j] if(tempbub > max): max = tempbub n2cp = cp[i][j] maxbubs[i][0] = max maxbubs[i][1] = n2cp except IndexError: pass return maxbubs # TODO: having dive might be redundant if compartment pressures can be used? # TODO: Find out how to combine the nitrogen m values with helium m values - when helium and nitrogen is in gas mixture def gradient_factors(dive, gases, compartment_pressures): """ calculates the maximum percentage of the respective M-value any compartment reaches otherwise known as the gradient factor. Below values are harcoded from Erik C. Baker's “Understanding M-values” from tables 2 & 4 :param dive: dataframe: a dataframe containing columns: time and depth :param gasses: str: TODO: this will be a list later? :param compartment_pressures: dataframe containing compartment pressure values :return: float[]: list of gradient factor values """ cp = compartment_pressures # nitrogen delta slope values in order [1, 1b, 2, ... 16] n_delta = dc.N_DELTA # nitogen surfacing m-value in order [1, 1b, 2, ... 16] n_m_naught = dc.N_M_NAUGHT # helium delta slope values in order [1, 1b, 2, ... 16] he_delta = dc.HE_DELTA # helium surfacing m-value in order [1, 1b, 2, ... 16] he_m_naught = dc.HE_M_NAUGHT gaugeP = np.zeros(len(dive[0])) # nitrogen and helium XDM, calculation = (the respective gas * gauge pressure at each timepoint) nXDM = np.zeros((len(gaugeP), 17)) heXDM = np.zeros((len(gaugeP), 17)) # nitrogen and helium respective m values n_mvalues = np.zeros((len(nXDM), 17)) he_mvalues = np.zeros((len(heXDM), 17)) # if a dive has both nitrogen and helium then we need to combine the m values using a weighting total_mvalues = np.zeros((len(nXDM), 17)) GFs = np.zeros((len(n_mvalues), 17)) maxGF = np.zeros(len(gaugeP)) for i in range(len(gaugeP)): gaugeP[i] = dive[1][i]/10 for j in range(17): nXDM[i][j] = gaugeP[i] * n_delta[j] heXDM[i][j] = gaugeP[i] * he_delta[j] n_mvalues[i][j] = (n_m_naught[j]/10) + nXDM[i][j] he_mvalues[i][j] = (he_m_naught[j]/10) + heXDM[i][j] GFs[i][j] = (cp[j][i] / n_mvalues[i][j]) * 100 maxGF[i] = round(np.max(GFs[i])) ''' print("\ngaugeP") print(gaugeP) print("\nnXDM") print(nXDM) print("\nheXDM") print(heXDM) print("\n n_mvalues") print(n_mvalues) print("\n gradient factors") print(GFs) print("\nmax GF") print(maxGF) ''' def helium_inert_pressure(ambient_pressures, gases): """ calculate inert gas partial pressure of helium at each time point :param ambient_pressures: float[]: a list of ambient pressures at each time point :param gasses: str: TODO: this will be a list later? :return: float[]: the inert gas partial pressure of helium at each time point """ # this will need to be changed later to get the actual value of helium helium = dc.HELIUM ap = ambient_pressures heIPP = np.zeros(len(ap)) for i in range(len(ap)): heIPP[i] = ap[i, 1] * helium return heIPP def nitrogen_inert_pressure(ambient_pressures, gases): """ calculate inert gas partial pressure of nitrogen at each time point :param ambient_pressures: float[]: a list of ambient pressures at each time point :param gasses: str: TODO: this will be a list later? :return: float[]: the inert gas partial pressure of nitrogen at each time point """ nitrogen = dc.NITROGEN ap = ambient_pressures nIPP = np.zeros(len(ap)) for i in range(len(ap)): nIPP[i] = ap[i, 1] * nitrogen return nIPP def totalIPP(nIPP, heIPP): """ calculate the total inert gas partial pressure :param niPP: float[]: the inert gas partial pressure of nitrogen at a given time points :param heIPP: float[]: the inert gas partial pressure of helium at a given time points :return: float[]: the total inert gas partial pressure at given time points """ total_IPP = np.zeros(len(nIPP)) for i in range(len(nIPP)): total_IPP[i] = nIPP[i] + heIPP[i] return total_IPP
python
from collections import Counter from random import randint from django.http import JsonResponse from django.shortcuts import render from django.views.generic import View, TemplateView from .models import Article, portals, languages from utils.utils import parse_a_website BENCHMARK_URL = 'https://www.benchmark.pl/' BGG_URL = 'https://boardgamegeek.com/blog/1/boardgamegeek-news' ZWIAD_HISTORII_URL = 'https://www.zwiadowcahistorii.pl/' TOJUZBYLO_URL = 'https://tojuzbylo.pl/aktualnosci' COMPUTER_WORLD_WEB_URL = 'https://www.computerworld.pl/' PYTHON_WEB_URL = 'https://www.infoworld.com/uk/category/python/' REAL_PYTHON_WEB_URL = 'https://realpython.com/' BUSHCRAFTABLE_URL = 'https://bushcraftable.com/' class HomeView(TemplateView): template_name = 'homepage.html' class StatisticsView(View): def get(self, request): return render(self.request, 'statistics.html') def get_all_article_pie_chart_data(self): all_articles = list(Article.objects.all().values_list('portal', flat=True)) articles = Counter(all_articles) colors = [] for color in range(len(articles)): color = '#%06x' % randint(0, 0xFFFFFF) colors.append(color) context = { 'labels': list(articles.keys()), 'data': list(articles.values()), 'colors': colors, } return JsonResponse(data=context) def get_all_article_tab_chart_data(self): all_articles = list(Article.objects.all().values_list('portal', flat=True)) articles = Counter(all_articles) sorted_articles = dict(sorted(articles.items(), key=lambda item: item[1], reverse=True)) colors = [] for color in range(len(articles)): color = '#%06x' % randint(0, 0xFFFFFF) colors.append(color) context = { 'labels': list(sorted_articles.keys()), 'data': list(sorted_articles.values()), 'colors': colors, } return JsonResponse(data=context) def get_top_en_word_chart_data(self): all_titles = list(Article.objects.filter(language='ENG').values_list('title', flat=True)) top_words = [] for title in all_titles: split_title = title.split(' ') for word in split_title: if len(word) > 3: top_words.append(word.lower()) count_top_words = Counter(top_words) sorted_words = dict(sorted(count_top_words.items(), key=lambda item: item[1], reverse=True)) colors = [] for color in range(10): color = '#%06x' % randint(0, 0xFFFFFF) colors.append(color) context = { 'labels': list(sorted_words.keys())[:10], 'data': list(sorted_words.values())[:10], 'colors': colors, } return JsonResponse(data=context) def get_top_pl_word_chart_data(self): all_titles = list(Article.objects.filter(language='PL').values_list('title', flat=True)) top_words = [] for title in all_titles: split_title = title.split(' ') for word in split_title: if len(word) > 3: top_words.append(word.lower()) count_top_words = Counter(top_words) sorted_words = dict(sorted(count_top_words.items(), key=lambda item: item[1], reverse=True)) colors = [] for color in range(10): color = '#%06x' % randint(0, 0xFFFFFF) colors.append(color) context = { 'labels': list(sorted_words.keys())[:10], 'data': list(sorted_words.values())[:10], 'colors': colors, } return JsonResponse(data=context) class BenchmarkView(View): def get(self, *args, **kwargs): soup = parse_a_website(BENCHMARK_URL) # Getting data from soup data = [] sections = soup.find_all('section') section_3 = sections[3] section_3_divs = section_3.find_all('div') for div in section_3_divs[1:2]: benchmark_li = div.find_all('li') for li in benchmark_li: title = (li.find('a').text).split('\t\t\t')[1].split('\n')[0] url = f"http://benchmark.pl{li.find('a')['href']}" data.append((url, title)) # Creating Article Article.check_if_article_already_exist(data, portals[0][0], languages[0][1]) # Check if data not empty if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'benchmark.html', context) context = { 'data': data, } return render(self.request, 'benchmark.html', context) class BoardGamesGeekView(View): def get(self, *args, **kwargs): soup = parse_a_website(BGG_URL) # Getting data from soup data = [] posts = soup.find_all("h3", {"class": 'post_title'}) for post in posts: title = post.find('a').text url = f"https://boardgamegeek.com{post.find('a')['href']}" data.append((url, title)) # Creating Article Article.check_if_article_already_exist(data, portals[1][1], languages[1][1]) # Check if data not empty if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'bgg.html', context) context = { 'data': data, } return render(self.request, 'bgg.html', context,) class ArcheologyView(View): def get(self, *args, **kwargs): soup = parse_a_website(ZWIAD_HISTORII_URL) # Getting data from soup data = [] divs_1 = soup.find_all("div", {"class": 'td_module_1 td_module_wrap td-animation-stack'}) for div in divs_1: divs_2 = div.find_all('div', {'class': 'td-module-thumb'}) for element in divs_2: title = element.find('a')['title'] url = element.find('a')['href'] img = element.find('img')['data-img-url'] data.append((url, title, img)) # Creating Article Article.check_if_article_already_exist(data, portals[3][1], languages[0][1]) if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'archeology.html', context) context = { 'data': data, } return render(self.request, 'archeology.html', context) class ToJuzByloView(View): def get(self, *args, **kwargs): soup = parse_a_website(TOJUZBYLO_URL) # Getting data from soup data = [] tds = soup.find_all('td', {'class': 'col-1 col-first'}) for td in tds: title = (td.find('h2', {'class': 'tytul'}).text).split('\n')[1] img = td.find('img')['src'] href = td.find_all('a')[1]['href'] url = f"https://tojuzbylo.pl/{href}" data.append((url, title, img)) # Creating Article Article.check_if_article_already_exist(data, portals[2][1], languages[0][1]) if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'tojuzbylo.html', context) context = { 'data': data, } return render(self.request, 'tojuzbylo.html', context,) class ComputerWorldView(View): def get(self, *args, **kwargs): soup = parse_a_website(COMPUTER_WORLD_WEB_URL) # Getting data from soup data = [] main_div = soup.find('div', {'class': 'left-side'}) divs = main_div.find_all('div', {'class': 'row-item-icon'}) for div in divs: img = div.find('img', {'class': 'img-fluid'})['src'] url = f"https://www.computerworld.pl{div.find('a')['href']}" title = div.find('a')['href'].split(',')[0].split('/')[2].replace('-', ' ') data.append((url, title, img)) # Creating Article Article.check_if_article_already_exist(data, portals[4][1], languages[0][1]) if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'computer_world_news.html', context) context = { 'data': data, } return render(self.request, 'computer_world_news.html', context,) class PythonView(View): def get(self, *args, **kwargs): soup = parse_a_website(PYTHON_WEB_URL) # Getting data from soup data = [] divs = soup.find_all('div', {'class': 'post-cont'}) figs = soup.find_all('figure', {'class': 'well-img'}) for div, figure in zip(divs, figs): title = div.find('a').text url = f"https://www.infoworld.com{div.find('a')['href']}" img = figure.find('img')['data-original'] data.append((url, title, img)) # Creating Article Article.check_if_article_already_exist(data, portals[5][1], languages[1][1]) if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'python.html', context) context = { 'data': data, } return render(self.request, 'python.html', context) class RealPythonView(View): def get(self, *args, **kwargs): soup = parse_a_website(REAL_PYTHON_WEB_URL) # Getting data from soup data = [] posts = soup.find_all('div', {'class': 'card border-0'}) for post in posts: a_tags = post.find_all('a')[0] title = a_tags.find('img')['alt'] img = a_tags.find('img')['src'] url = f"https://realpython.com{a_tags['href']}" data.append((url, title, img)) # Creating Article Article.check_if_article_already_exist(data, portals[6][1], languages[1][1]) if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'real_python.html', context) context = { 'data': data, } return render(self.request, 'real_python.html', context) class BushcraftableView(View): def get(self, *args, **kwargs): soup = parse_a_website(BUSHCRAFTABLE_URL) # Getting data from soup data = [] post_headers = soup.find_all('h2', {'class': 'entry-title'}) post_images = soup.find_all('div', {'class': 'post-image'}) for header, image in zip(post_headers, post_images): url = header.find('a')['href'] title = header.find('a').text img = image.find('img')['src'] data.append((url, title, img)) # Creating Article Article.check_if_article_already_exist(data, portals[7][1], languages[1][1]) if len(data) == 0: context = {'data': [('#', 'No data to view. Contact with administrator.')]} return render(self.request, 'bushcraftable.html', context) context = { 'data': data, } return render(self.request, 'bushcraftable.html', context) # soup.find_all(lambda tag: tag.name == 'p' and 'et' in tag.text) # https://www.livescience.com/news # TODO: Widok statystyk. Obliczenie ilości artykułów i piechart na widoku statystycznym, # TODO: Settingsy porownac do django projektu KWL/Inforshare i pozmieniać. # detect language - https://pypi.org/project/langdetect/
python
'''utils and constants functions used by the selector and selectors class''' import re RE_ALPHA = re.compile(r'\w') SELECTOR_TYPE = {'XML': 'xml', 'TRXML': 'trxml'} TRXML_SELECTOR_TYPE = {'SINGLETON': 'singleton', 'MULTIPLE': 'multiple'} def valid_field_name(tag_name: str = '') -> bool: ''' simple validation function: params: - tag_name: string output: - True/False ''' # need to contain at least one alphabet chars if RE_ALPHA.search(tag_name) is None: raise ValueError( f"tag_name '{tag_name}' needs at least one alphabet char") return True def _selector_target_type(selector) -> str: if "." in selector.text: selector_type = SELECTOR_TYPE['TRXML'] else: selector_type = SELECTOR_TYPE['XML'] return selector_type def _selector_singleton_type(selector) -> bool: item_index = selector.item_index if item_index.isdigit(): selector_type = TRXML_SELECTOR_TYPE['SINGLETON'] else: selector_type = TRXML_SELECTOR_TYPE['MULTIPLE'] return selector_type def _selector_same_itemgroup(selector) -> str: return selector.itemgroup_name def selector_attribute(selectors, attribute_name) -> str: ''' fetch the selector attribute, and check the consistency of all selectors params: - selectors: a list of selector object - attribute_name: name of the attribute output: attibute_value: string ''' if attribute_name == 'selector_type': result = _selector_attribute_checking(selectors, _selector_target_type) elif attribute_name == 'trxml_selector_type': result = _selector_attribute_checking(selectors, _selector_singleton_type) elif attribute_name == 'same_itemgroup': result = _selector_attribute_checking(selectors, _selector_same_itemgroup) else: raise ValueError( f"selector attribute type '{attribute_name}' unknown" ) return result def _selector_attribute_checking(selectors, attrib_func): first_attrib = None for selector in selectors: if first_attrib is None: first_attrib = attrib_func(selector) elif first_attrib != attrib_func(selector): raise ValueError( f"""selector '{selector.text}' seems has different type than others, e.g., - xml v.s. trxml, - or singleton V.S. multi-item - or different itemgroup for multi-item selectors. Please check! """ ) return first_attrib
python
import matplotlib.pyplot as plt import random import numpy as np import cv2 def visualize(img, det_boxes=None, gt_boxes=None, keypoints=None, is_show_label=True, show_cls_label = True, show_skeleton_labels=False, classes=None, thresh=0.5, name='detection', return_img=False): if is_show_label: if classes == 'voc': classes = [ '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] elif classes == 'coco': classes = [ "__background__", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket","bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" ] color_map = [(0, 0, 0), (0, 255, 0), (255, 128, 0), (255, 255, 0), (255, 0, 255), (255, 128, 255), (128, 255, 128), (128, 255, 255), (255, 255, 128), (0, 128, 255), (0, 255, 128), (255, 0, 128), (0, 215, 255), (255, 0, 255), (255, 128, 0), (128, 128, 255), (0, 255, 255), (0, 69, 255), (0, 69, 255), (255, 204, 204), (204, 255, 255)] im = np.array(img).copy().astype(np.uint8) colors = dict() font = cv2.FONT_HERSHEY_SIMPLEX if det_boxes is not None: det_boxes = np.array(det_boxes) for det in det_boxes: bb = det[:4].astype(int) if is_show_label: if show_cls_label: cls_id = int(det[4]) if cls_id == 0: continue if len(det) > 4: score = det[-1] else: score = 1. if thresh < score: if show_cls_label: if cls_id not in colors: colors[cls_id] = (random.random() * 128 + 128, random.random() * 128 + 128, random.random() * 128 + 128) cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), colors[cls_id], 1) if classes and len(classes) > cls_id: cls_name = classes[cls_id] else: cls_name = str(cls_id) cv2.putText(im, '{:s} {:.3f}'.format(cls_name, score), (bb[0], bb[1] - 2), font, 0.7, colors[cls_id], 2) else: cv2.putText(im, '{:.3f}'.format(score), (bb[0], bb[1] - 2), font, 0.7, (255, 0, 0), 2) cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (139, 139, 139), 1) else: cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (random.random() * 128 + 128, random.random() * 128 + 128, random.random() * 128 + 128), 1) if gt_boxes is not None: gt_boxes = np.array(gt_boxes) for gt in gt_boxes: bb = gt[:4].astype(int) if is_show_label: cls_id = int(gt[4]) cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 3) if classes and len(classes) > cls_id: cls_name = classes[cls_id] else: cls_name = str(cls_id) cv2.putText(im, '{:s}'.format(cls_name), (bb[0], bb[1] - 2), \ font, 0.5, (0, 0, 255), 1) else: cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 3) if keypoints is not None: keypoints = np.array(keypoints).astype(int) keypoints = keypoints.reshape(-1, 17, 3) if False: idx = np.where(det_boxes[:, -1] > thresh) keypoints = keypoints[idx] for i in range(len(keypoints)): draw_skeleton(im, keypoints[i], show_skeleton_labels) else: for i in range(len(keypoints)): draw_skeleton(im, keypoints[i], show_skeleton_labels) if return_img: return im.copy() import matplotlib.pyplot as plt im = cv2.cvtColor ( im, cv2.COLOR_BGR2RGB ) plt.imshow(im) plt.show() # cv2.imshow(name, im) # cv2.waitKey(0) # while True: # c = cv2.waitKey(0) # if c == ord('d'): # return # elif c == ord('n'): # break def draw_skeleton(aa, kp, show_skeleton_labels=False): skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] kp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder', 'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist', 'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle'] for i, j in skeleton: if kp[i-1][0] >= 0 and kp[i-1][1] >= 0 and kp[j-1][0] >= 0 and kp[j-1][1] >= 0 and \ (len(kp[i-1]) <= 2 or (len(kp[i-1]) > 2 and kp[i-1][2] > 0.1 and kp[j-1][2] > 0.1)): cv2.line(aa, tuple(kp[i-1][:2]), tuple(kp[j-1][:2]), (0,255,255), 2) for j in range(len(kp)): if kp[j][0] >= 0 and kp[j][1] >= 0: if len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 1.1): cv2.circle(aa, tuple(kp[j][:2]), 2, tuple((0,0,255)), 2) elif len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1): cv2.circle(aa, tuple(kp[j][:2]), 2, tuple((255,0,0)), 2) if show_skeleton_labels and (len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1)): cv2.putText(aa, kp_names[j], tuple(kp[j][:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))
python
import webbrowser class RomanNumeralCipher: def __init__(self): ''' This is a python implementation of Roman Numeral Cipher''' self.val = [ 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 ] self.syb = [ "M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I" ] url = 'https://www.britannica.com/topic/Roman-numeral' def about(self): '''Read about Roman Numeral Cipher online''' webbrowser.open(self.url) def encrypt(self, num: int) -> str: result = '' if not isinstance(num, int): return 'Cannot cast to Roman cipher' i = 0 while num > 0: for _ in range(num // self.val[i]): result += self.syb[i] num -= self.val[i] i += 1 return result def decrypt(self, msg: str) -> int: list_ = ['CM', 'CD', 'XC', 'XL', 'IX', 'IV'] num = 0 for ele in list_: if ele in msg: msg = msg.replace(ele, '') num += self.val[self.syb.index(ele)] for ele in msg: num += self.val[self.syb.index(ele)] return num if __name__ == '__main__': cipher = RomanNumeralCipher() message = 3349 encrypted = cipher.encrypt(message) decrypted = cipher.decrypt(encrypted) print(encrypted) print(decrypted)
python
import json import disnake as discord from disnake.ext import commands class Active_Check(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def activate(self, ctx, cog=None): with open('utils/json/active_check.json', 'r') as f: data = json.load(f) if not cog: if str(ctx.guild.id) not in data: new_checks = { "Administration": "true", "Automod": "true", "Channel": "true", "Fun": "true", "Help": "true", "Info": "true", "Math": "true", "Moderation": "true", "Music": "true", "Poll": "true", "Roles": "true", "Rules": "true", "Setup": "true", "Ticket": "true", "Timers": "true", "Translator": "true", "Utilities": "true", "Verify": "true" } data[str(ctx.guild.id)] = new_checks with open('utils/json/active_check.json', 'w') as f: json.dump(data, f, indent=4) embed = discord.Embed(description=f'Der Server `{ctx.guild.name}` wurde **erfolgreich registriert!**', color=discord.Color.green()) await ctx.send(embed=embed) else: embed = discord.Embed(description=f'Der Server `{ctx.guild.name}` ist **bereits registriert!**', color=discord.Color.green()) await ctx.send(embed=embed) elif data[str(ctx.guild.id)][f"{cog}"] == 'true': embed = discord.Embed(description=f'Das `Modul {cog}` ist **bereits aktiviert!**', color=discord.Color.green()) await ctx.send(embed=embed) elif data[str(ctx.guild.id)][f"{cog}"] == 'false': data[str(ctx.guild.id)][f"{cog}"] = 'true' with open('utils/json/active_check.json', 'w') as f: json.dump(data, f, indent=4) embed = discord.Embed(description=f'Das `Modul {cog}` **war deaktiviert** und wurde **nun aktiviert!**', color=discord.Color.green()) await ctx.send(embed=embed) else: embed = discord.Embed( description=f'Dein Server scheint **nicht registriert zu sein!** **Registriere dein Server** bitte erst einmal **mit dem Befehl** `?activate`', color=discord.Color.red()) await ctx.send(embed=embed) @commands.command() async def deactivate(self, ctx, cog): with open('utils/json/active_check.json', 'r') as f: data = json.load(f) if str(ctx.guild.id) not in data: embed = discord.Embed( description=f'Dein Server scheint **nicht registriert zu sein!** **Registriere dein Server** bitte erst einmal **mit dem Befehl** `?activate`', color=discord.Color.red()) await ctx.send(embed=embed) elif data[str(ctx.guild.id)][f"{cog}"] == 'false': embed = discord.Embed(description=f'Das `Modul {cog}` ist **bereits deaktiviert!**', color=discord.Color.green()) await ctx.send(embed=embed) elif data[str(ctx.guild.id)][f"{cog}"] == 'true': data[str(ctx.guild.id)][f"{cog}"] = 'false' with open('utils/json/active_check.json', 'w') as f: json.dump(data, f, indent=4) embed = discord.Embed(description=f'Das `Modul {cog}` **war aktiviert** und wurde **nun deaktiviert!**', color=discord.Color.green()) await ctx.send(embed=embed) else: embed = discord.Embed(description=f'**Unbekannter Fehler!** Versuche es in ein paar Sekunden erneut', color=discord.Color.red()) await ctx.send(embed=embed) @commands.command() async def check(self, ctx, cog): with open('utils/json/active_check.json', 'r') as f: data = json.load(f) if str(ctx.guild.id) not in data: embed = discord.Embed( description=f'Dein Server scheint **nicht registriert zu sein!** **Registriere dein Server** bitte erst einmal **mit dem Befehl** `?activate`', color=discord.Color.red()) await ctx.send(embed=embed) elif data[str(ctx.guild.id)][f"{cog}"] == 'false': embed = discord.Embed(description=f'Das Modul `{cog}` ist **momentan deaktiviert!**', color=discord.Color.green()) await ctx.send(embed=embed) elif data[str(ctx.guild.id)][f"{cog}"] == 'true': embed = discord.Embed(description=f'Das Modul `{cog}` ist **momentan aktiviert!**', color=discord.Color.green()) await ctx.send(embed=embed) else: embed = discord.Embed(description=f'**Unbekannter Fehler!** Versuche es in ein paar Sekunden erneut', color=discord.Color.red()) await ctx.send(embed=embed) @commands.command() async def check_all(self, ctx): with open('utils/json/active_check.json', 'r') as f: data = json.load(f) if str(ctx.guild.id) not in data: embed = discord.Embed( description=f'Dein Server scheint **nicht registriert zu sein!** **Registriere dein Server** bitte erst einmal **mit dem Befehl** `?activate`', color=discord.Color.red()) await ctx.send(embed=embed) elif str(ctx.guild.id) in data: embed = discord.Embed(description=f'{data[str(ctx.guild.id)]}', color=discord.Color.green()) await ctx.send(embed=embed) else: return def setup(bot): bot.add_cog(Active_Check(bot))
python
import sys sys.path.append("../common/tests") from test_utils import * import test_common sys.path.insert(0, '../../../../build/production/config/schema-transformer/') from vnc_api.vnc_api import * import uuid class STTestCase(test_common.TestCase): def setUp(self): super(STTestCase, self).setUp() self._svc_mon_greenlet = gevent.spawn(test_common.launch_svc_monitor, self._api_server_ip, self._api_server_port) self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer, self._api_server_ip, self._api_server_port) def tearDown(self): self._svc_mon_greenlet.kill() self._st_greenlet.kill() super(STTestCase, self).tearDown() def create_virtual_machine(self, name, vn, ipaddress): vm_instance = VirtualMachine(name) self._vnc_lib.virtual_machine_create(vm_instance) fq_name = [name] fq_name.append('0') vmi = VirtualMachineInterface(parent_type = 'virtual-machine', fq_name = fq_name) vmi.set_virtual_network(vn) self._vnc_lib.virtual_machine_interface_create(vmi) ip = InstanceIp(vm_instance.name + '.0') ip.set_virtual_machine_interface(vmi) ip.set_virtual_network(vn) ip.set_instance_ip_address(ipaddress) uuid = self._vnc_lib.instance_ip_create(ip) return vm_instance def vmi_clean(self, vm_instance): fq_name = vm_instance.fq_name fq_name.append('0') try: vmi = self._vnc_lib.virtual_machine_interface_read(fq_name = fq_name) except NoIdError: return ips = vmi.get_instance_ip_back_refs() for ref in ips: self._vnc_lib.instance_ip_delete(id = ref['uuid']) self._vnc_lib.virtual_machine_interface_delete(id = vmi.uuid) def delete_virtual_machine(self, vm_instance): self.vmi_clean(vm_instance) self._vnc_lib.virtual_machine_delete(id = vm_instance.uuid) def create_network_policy_with_multiple_rules(self, rules): pentrys = [] for rule in rules: src_addr = rule["src"] if src_addr["type"] == "vn": vn = src_addr["value"] addr1 = AddressType(virtual_network=vn.get_fq_name_str()) else: cidr = src_addr["value"].split('/') pfx = cidr[0] pfx_len = int(cidr[1]) addr1 = AddressType(subnet=SubnetType(pfx, pfx_len)) dst_addr = rule["dst"] if dst_addr["type"] == "vn": vn = dst_addr["value"] addr2 = AddressType(virtual_network=vn.get_fq_name_str()) else: cidr = dst_addr["value"].split('/') pfx = cidr[0] pfx_len = int(cidr[1]) addr2 = AddressType(subnet=SubnetType(pfx, pfx_len)) #src_port = rule["src-port"] src_port = PortType(-1, 0) #dst_port = rule["dst-port"] dst_port = PortType(-1, 0) action = rule["action"] action_list = ActionListType(simple_action=action) prule = PolicyRuleType(direction=rule["direction"], protocol=rule["protocol"], src_addresses=[addr1], dst_addresses=[addr2], src_ports=[src_port], dst_ports=[dst_port], action_list=action_list) pentrys.append(prule) pentry = PolicyEntriesType(pentrys) np = NetworkPolicy(str(uuid.uuid4()), network_policy_entries=pentry) self._vnc_lib.network_policy_create(np) return np # end create_network_policy_with_multiple_rules def delete_network_policy(self, policy, auto_policy=False): action_list = policy.network_policy_entries.policy_rule[0].action_list if action_list: for service in action_list.apply_service or []: si = self._vnc_lib.service_instance_read(fq_name_str=service) st_ref = si.get_service_template_refs() st = self._vnc_lib.service_template_read(id=st_ref[0]['uuid']) self._vnc_lib.service_instance_delete(id=si.uuid) self._vnc_lib.service_template_delete(id=st.uuid) # end for service # if action_list if not auto_policy: self._vnc_lib.network_policy_delete(id=policy.uuid) # end delete_network_policy(policy)
python
from django.contrib import admin from unecorn.models import * admin.site.register(Discount) admin.site.register(Category) admin.site.register(Company)
python
from keras.models import Sequential, model_from_json from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda from keras.layers import Cropping2D from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from keras.layers.advanced_activations import ELU from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, Callback from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer import math import numpy as np from PIL import Image import cv2 import matplotlib.pyplot as plt from os import getcwd import csv # Fix error with TF and Keras import tensorflow as tf # tf.python.control_flow_ops = tf import sklearn def displayCV2(img): # Displaying a CV2 Image cv2.imshow('image',img) cv2.waitKey(0) cv2.destroyAllWindows() samples = [] #simple array to append all the entries present in the .csv file with open('./data/driving_log.csv') as csvfile: #currently after extracting the file is present in this path reader = csv.reader(csvfile) next(reader, None) #this is necessary to skip the first record as it contains the headings for line in reader: samples.append(line) # Code for Data Augmentation (Image Generator) def generator(samples, batch_size=32): num_samples = len(samples) while 1: shuffle(samples) # Shuffling the total images for offset in range(0, num_samples, batch_size): batch_samples = samples[offset:offset+batch_size] images = [] angles = [] for batch_sample in batch_samples: for i in range(0,3): # Taking 3 images, first one is center, second is left, and third is right name = './data/data/IMG/'+batch_sample[i].split('/')[-1] center_image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB) # Since CV2 reads an image in BGR we need to convert it to RGB since in drive.py it is RGB center_angle = float(batch_sample[3]) # Getting the steering angle measurement images.append(center_image) # Introducing correction for left and right images # if using the left image (i == 1), then increase the steering angle by 0.2 # if using the right image (i == 2), then decrease the steering angle by 0.2 if(i == 0): angles.append(center_angle) elif(i == 1): angles.append(center_angle + 0.2) elif(i == 2): angles.append(center_angle - 0.2) # Code for Augmentation of data (6 augmented images per 1 source image) # We flip the image and mirror the associated steering angle measurement images.append(cv2.flip(center_image,1)) if(i==0): angles.append(center_angle*-1) elif(i==1): angles.append((center_angle+0.2)*-1) elif(i==2): angles.append((center_angle-0.2)*-1) # Here we can get 6 images from one image X_train = np.array(images) y_train = np.array(angles) yield sklearn.utils.shuffle(X_train, y_train) # Here we do not hold the values of X_train and y_train instead we yield the values meaning we hold until generator() is running ### Main Program ### # Getting the data lines = [] with open('./data/driving_log.csv') as csvfile: reader = csv.reader(csvfile) next(reader) for line in reader: lines.append(line) images = [] measurements = [] for line in lines: source_path = line[0] filename = source_path.split('/')[-1] current_path = './data/IMG/' + filename image = cv2.imread(current_path) images.append(image) measurement = float(line[3]) measurements.append(measurement) X_train = np.array(images) y_train = np.array(measurements) # The Neural Network Architecture (NVIDIA Model) model = Sequential() model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3))) model.add(Cropping2D(cropping=((70,25),(0,0)))) model.add(Conv2D(24, activation='relu', padding='valid', strides=(2,2), kernel_size=(5, 5))) model.add(ELU()) model.add(Conv2D(36, activation='relu', padding='valid', strides=(2,2), kernel_size=(5, 5))) model.add(ELU()) model.add(Conv2D(48, activation='relu', padding='valid', strides=(2,2), kernel_size=(5, 5))) model.add(ELU()) model.add(Dropout(0.5)) model.add(Conv2D(64, activation='relu', padding='valid', kernel_size=(3, 3))) model.add(ELU()) model.add(Conv2D(64, activation='relu', padding='valid', kernel_size=(3, 3))) model.add(ELU()) model.add(Flatten()) model.add(Dense(100)) model.add(ELU()) model.add(Dense(50)) model.add(ELU()) model.add(Dense(10)) model.add(ELU()) model.add(Dense(1)) model.compile(loss='mse', optimizer='adam') train_samples, validation_samples = train_test_split(samples,test_size=0.15) #simply splitting the dataset to train and validation set usking sklearn. .15 indicates 15% of the dataset is validation set # Compile and train the model using the generator function train_generator = generator(train_samples, batch_size=32) validation_generator = generator(validation_samples, batch_size=32) # model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=5) model.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=5, verbose=1) print(model.summary()) model.save('model.h5')
python
#!/usr/bin/env python import logging from p4p.client.thread import Context _log = logging.getLogger(__name__) def getargs(): from argparse import ArgumentParser P = ArgumentParser() P.add_argument('pvname', help='SIGS pvname (eg. RX:SIG') P.add_argument('filename', help='list of BSA/signal PV names. text, one per line') P.add_argument('-v', '--verbose', action='store_const', const=logging.DEBUG, default=logging.INFO) return P.parse_args() def main(args): sigs = [] with open(args.filename, 'r') as F: for line in F: line = line.strip() if len(line)==0 or line[:1]=='#': continue _log.debug("Read signal '%s'", line) sigs.append(line) with Context('pva') as ctxt: ctxt.put(args.pvname, sigs, wait=True) print("Success. Signal list now") for sig in ctxt.get(args.pvname): print(sig) if __name__=='__main__': args = getargs() logging.basicConfig(level=args.verbose) main(args)
python
#!/usr/bin/env python # coding: utf-8 import rospy import tf from geometry_msgs.msg import PoseStamped from sensor_msgs.msg import JointState from std_msgs.msg import Float64 import numpy as np class Pose_pub: def __init__(self): self._sub_pos = rospy.Subscriber("/head", PoseStamped, self.pose_callback) self.pub = rospy.Publisher("master_joint_state", JointState, queue_size=10) #コントローラの初期位置を取得 self.zero_pose = rospy.wait_for_message("/head", PoseStamped).pose quaternion = [self.zero_pose.orientation.x, self.zero_pose.orientation.y, self.zero_pose.orientation.z, self.zero_pose.orientation.w] euler = tf.transformations.euler_from_quaternion(quaternion, axes='rzyx') self.zero_pan = euler[0] #10Hzで動作 self.r = rospy.Rate(10) #コントローラ位置のスケール self.scale_fac = 1. #アーム手先位置のオフセット self.r_offset = 0.8 self.q_old = np.array([0., 0., 0., 0., 0., 0.]) #最大関節角速度 self.max_vel = 0.5 def pose_callback(self, message): self.pose = message.pose #逆運動学計算 def ik(self): while not rospy.is_shutdown(): #目標手先位置 r_ref = self.pose.position.z - self.zero_pose.position.z #位置のスケール r_ref *= self.scale_fac #アーム手先位置のオフセット r_ref += self.r_offset #手先位置が稼働範囲内に収まっているかチェック r_ref = self.check_movable_range(r_ref) theta = np.arccos(r_ref) pan, tilt, _ = self.calc_pan_tilt_angle() rospy.loginfo(pan) q = np.array([-pan - self.zero_pan, theta, -2 * theta, -tilt + theta, 0, 0]) q = self.angular_vel_limit(q) q_deg = np.rad2deg(q) js = JointState() js.name=["joint{}".format(i) for i in range(1,6)] js.position = q_deg self.pub.publish(js) self.r.sleep() #角速度制限 def angular_vel_limit(self, q): q_diff = self.q_old - q q_diff_max = np.abs(q_diff).max() if(q_diff_max > self.max_vel): rospy.loginfo("Too fast") q_diff /= q_diff_max q_diff *= self.max_vel q = self.q_old - q_diff self.q_old = q return q #ピッチ角計算 def calc_pan_tilt_angle(self): quaternion = [self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w] euler = tf.transformations.euler_from_quaternion(quaternion, axes='rzyx') return euler def check_movable_range(self, r_ref): if r_ref > 1: rospy.loginfo("Out of movable range") r_ref = 1 return r_ref if __name__ == '__main__': try: rospy.init_node('pan_tilt_controller') pose_pub = Pose_pub() pose_pub.ik() rospy.spin() except rospy.ROSInterruptException: pass
python
""" 语言概念与机制 http://coolpython.net/python_interview/basic/py_concept_mechanism.html """ # 01 谈下GIL 全局解释器锁 # 02 遍历文件夹,输出文件夹下所有文件的路径 import os def print_directory_contents(path): test02_dirList = os.listdir(path) for childfile in test02_dirList: childPath = os.path.join(path, childfile) # 判断为文件夹 if os.path.isdir(childPath): print_directory_contents(childPath) else: print(childPath) print_directory_contents('./') def get_english_score(): return 90 def get_history_score(): return 95 def get_score(course): golbal_dic = globals() print(golbal_dic) funname = f'get_{course}_score' # 如果找不到,直接返回lambda表达式,不会应为程序而报错 func = golbal_dic.get(funname, lambda: 0) return func() print(get_score('english')) print(get_score('abc')) for i, j in enumerate([3, 65, 2, 5, 6]): print(i, j) def abc(): print('aa') print(abc()) import enum
python
import gzip import jsonpickle from mdrsl.rule_models.eids.st_to_mt_model_merging import MergedSTMIDSClassifier def store_merged_st_mids_model(merged_model_abs_file_name: str, merged_st_mids_classifier: MergedSTMIDSClassifier) -> None: frozen = jsonpickle.encode(merged_st_mids_classifier) with gzip.open(merged_model_abs_file_name, 'wt') as ofile: ofile.write(frozen) def load_merged_st_mids_model(merged_model_abs_file_name: str) -> MergedSTMIDSClassifier: mids_classifier: MergedSTMIDSClassifier with gzip.open(merged_model_abs_file_name, 'rt') as ifile: file_contents = ifile.read() mids_classifier = jsonpickle.decode(file_contents) return mids_classifier
python
#https://www.youtube.com/watch?v=2egPL5KFCC8&list=PLGKQkV4guDKEKZXAyeLQZjE6fulXHW11y&index=2 #java scrip cannot be pull by beautifulsoap, java scrip use sileniun #resultdo 0 para atributo existentem, vem exemplo imagem como pegar import requests from bs4 import BeautifulSoup url = "https://www.marketwatch.com/" response = requests.get(url) soup = BeautifulSoup(response.content, 'html.parser') soup.find_all('div', class_ = "element element--latestNews") a = len(soup.find_all('div', class_ = "element element--latestNews")) #sempre usar len para sabe quantos elementos/tag tem, neste caso o elemento/tag é div, para sabe quantidade div com o mesmo nome, vemos qual é o atributo, neste caso é uma class, se fosse um atributo id, não precisa de sabe quantas div, pois id são unicos, com class nome "element element--latestNews" temos apenas len = 1 b = soup.find_all('div', class_ = "element element--latestNews") c = soup.find('a').get('href') #HTML links are defined with the <a> tag. The link address is specified in the href attribute: no caso acima extrai apenas um link, para extrair todo usar um loop #linkes() e = soup.find_all('ul') f = len(soup.find_all('ul')) g = soup.find_all('ul')[0] print(5*'\n') print(soup.find_all('ul', class_ ="list list--menu j-list")) print(len(soup.find_all('ul', class_ ="list list--menu j-list"))) print(5*'\n') #https://www.w3schools.com/html/html_lists.asp #https://www.youtube.com/watch?v=5IxadAxTS04&list=PLGKQkV4guDKEKZXAyeLQZjE6fulXHW11y&index=3 #listas() def imagem(): #print(soup.find_all('img')) print(soup.find('img').get('src')) print(soup.find('img').get('data-src')) #get nao funciona para o atributo 'data-src', usa o codigo abaixo, print(soup.find('img', attrs = {'data-src' : True})) #print(soup.findAll('img', attrs = {'data-src' : True})) #https://www.w3schools.com/html/html_images.asp imagem()
python
# coding: utf-8 """ jatdb JSON API to DB: Fetch JSON from APIs and send to a TinyDB database. # noqa: E501 OpenAPI spec version: 0.0.2 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import jatdb_client from jatdb_client.api.content_api import ContentApi # noqa: E501 from jatdb_client.rest import ApiException class TestContentApi(unittest.TestCase): """ContentApi unit test stubs""" def setUp(self): self.api = jatdb_client.api.content_api.ContentApi() # noqa: E501 def tearDown(self): pass def test_content_get(self): """Test case for content_get """ pass def test_content_post(self): """Test case for content_post """ pass def test_content_put(self): """Test case for content_put """ pass if __name__ == '__main__': unittest.main()
python
import json import os import sys import re import pickle import logging import gzip import shutil import urllib.request from tqdm import tqdm from collections import defaultdict from utils.data_utils import load_jsonl_file, create_pkl_file, load_pkl_file module_path = os.path.dirname(os.path.abspath(__file__)) # --------------------------------------------- Pipelines ---------------------------------------------------- class DataPreprocessingRoutine: ''' Data Preparation Routine This class holds utilities that execute a data processing routine that: 1. Loads Natural Questions simplified training dataset from local directory 2. Filters the examples to only those relevant to retriever evaluation (has short_answer, resolves multiple answers) 3. Cleans, parses, and extracts relevant data fields 4. Saves the prepared data to a local directory Args: retriever_eval_only (bool) - indicates if the pipeline incluedes short answer AND no answer (False) or short answer only (True) raw_data_path (str) - path to unzipped simplified nq jsonl file ''' def __init__(self, raw_data_path, retriever_eval_only=True): self.mode = retriever_eval_only self.raw_data_path = raw_data_path def run(self): logging.info('Data Processing Routine Started') # check if file already exits ext = "" if self.mode else "_fullsys" outfile = module_path+f'/data/stage_data/extracted_clean_data{ext}.pkl' ## TO-DO: Make this implicit! if not os.path.exists(outfile): # run pipeline self.load_data() self.filter_nq_train_data() self.extract_data() self.drop_longer_answers() # save data os.makedirs(module_path+'/data/stage_data', exist_ok=True) self.save_data(outfile) logging.info('Data Processing Routine Finished') else: logging.info('This file has already been created. Skipping DataPreprocessing and using existing file.') return def load_data(self): ''' Loads raw, zipped jsonl data from disk ''' self.data = load_jsonl_file(filepath=self.raw_data_path) return def filter_nq_train_data(self): ''' This method takes the full corpus of NQ training data and filters examples that are not relevant for proper retriever evaluation, including: a.) records that do not have at least one short answer are discarded and b.) records that have more than one short answer are truncated to only use the first short answer. These filters are in line with standard retriever evaluation techniques as well as Google's suggested reference implementation: https://github.com/google-research/language/blob/master/language/question_answering/ decatt_docreader/preprocessing/create_nq_short_pipeline_examples.py Args: raw_data (list) - python object representation of the raw jsonl file retriever_eval_only (bool) - if False, include short answer AND no answer Returns: filtered_data (list) - a refined version of the raw jsonl file ''' logging.info('Filtering Data') multi_count = 0 filtered_data = [] for i, rec in enumerate(tqdm(self.data)): # ignore questions that dont have at least one short answer if len(rec['annotations'][0]['short_answers']) == 0 and self.mode==True: continue # if an annotation contains multiple short answers, keep only the first if len(rec['annotations'][0]['short_answers']) > 1: multi_count += 1 # extract first dict and keep as one-element list temp = [] short_ans = rec['annotations'][0]['short_answers'][0] temp.append(short_ans) # overwrite new_rec = rec.copy() new_rec['annotations'][0]['short_answers'] = temp filtered_data.append(new_rec) else: filtered_data.append(rec) logging.info(f'{len(self.data)-len(filtered_data)} records (out of {len(self.data)}) did not have at least one short answer and were dropped.') logging.info(f'{multi_count} questions had multiple short answers that were effected by truncation.') # overwrite data attribute self.data = filtered_data return def extract_data(self): ''' This method loops through a list of NQ simplified records and extracts only the data items needed for retriever evaluation including: - example_id - document_title (extracted from document_url using extract_wiki_title()) - document_url - question_text - short_answer (converted to text using get_short_answer_from_span()) - document_text_clean (stripped of remaining HTML tags using clean_document_text()) Args: data (list) - a list of filtered jsonl records from NQ simplified dataset Returns: extracted_data (list) - a list of cleaned jsonl records ''' logging.info('Extracting Data') extracted_data = [] for i, rec in enumerate(tqdm(self.data)): try: example_id = rec['example_id'] document_url = rec['document_url'] question_text = rec['question_text'] short_answer = self.get_short_answer_from_span(rec) document_text_clean = self.clean_document_text(rec['document_text']) document_title = self.extract_wiki_title(rec['document_url']) # to ensure our dataset is completely solveable this logic weeds out erroneous labels # ex. 'Mickey Hart </Li> <Li> Bill Kreutzmann </Li> <Li> John Mayer </Li> was selected as long AND short answer # when really each of these should have been their own short answers if short_answer not in document_text_clean: continue new_rec = {'example_id': example_id, 'document_title': document_title, 'document_url': document_url, 'question_text': question_text, 'short_answer': short_answer, 'document_text_clean': document_text_clean} extracted_data.append(new_rec) except Exception as e: logging.info(str(e)) continue logging.info(f'{len(extracted_data)} of the {len(self.data)} records are complete and solvable.') # overwrite data attribute self.data = extracted_data return def drop_longer_answers(self): ''' This method loops through a list of NQ simplified records and drops any records where the short answer contains more than 5 tokens. Answers with many tokens often resemble extractive snippets rather than canonical answers, so we discard answers with more than 5 tokens: https://arxiv.org/pdf/1906.00300.pdf Args: data (list) - a list of cleaned jsonl records from NQ simplified dataset Returns: extracted_data (list) - a list of cleaned jsonl records ''' logging.info('Dropping Long Answers') slim_data = [] for i, rec in enumerate(tqdm(self.data)): if len(rec['short_answer'].split(' ')) <= 5: slim_data.append(rec) logging.info(f'{len(self.data) - len(slim_data)} records were "long" short-answers and were dropped.') logging.info(f'{len(slim_data)} records remain.') # overwrite data attribute self.data = slim_data return def save_data(self, outfile): ''' Saves the data attribute to a pickle local file ''' create_pkl_file(self.data, outfile) return @staticmethod def get_short_answer_from_span(example): ''' Use the short answer span from a NQ json record to retreive and return the corresponding short answer text. Args: example - a jsonl record from NQ simplified dataset Returns: short_answer (string) - the string representation of text in the short answer span ''' sa_field = example['annotations'][0]['short_answers'] if len(sa_field) >= 1: short_answer_span = sa_field[0] short_answer = " ".join(example['document_text'].split(" ")\ [short_answer_span['start_token']:short_answer_span['end_token']]) else: short_answer = '' return short_answer @staticmethod def clean_document_text(text): ''' This function applies a regular expression to an input text string to remove any characters wrapped in <> with the goal of stripping HTML tags from a string. Args: text (string) Returns: text (string) - cleaned text ''' cleaner = re.compile('<.*?>') return re.sub(cleaner, '', text) @staticmethod def extract_wiki_title(document_url): ''' This function applies a regular expression to an input wikipedia article URL to extract and return the article title. Args: document_url (string) Returns: title (string) - article title ''' pattern = 'title=(.*?)&amp' try: title = re.search(pattern, document_url).group(1) except AttributeError: title = 'No Title Found' return title class DataCompilationRoutine: ''' Data Compilation Utility Pipeline This class holds utilties to execute a data routine that: 1. Loads pre-cleaned data from staging 2. Deduplicates Wikipedia artilces and finalizes them for loading into ElasticSearch 3. Creates q/a records to be used for evaluation 4. Saves those data artifacts to eval_data directory Args: retriever_eval_only (bool) - indicates if the pipeline incluedes short answer AND no answer (False) or short answer only (True) ''' def __init__(self, clean_data_path=None, retriever_eval_only=True): self.mode = retriever_eval_only # set clean data path ext = "" if self.mode else "_fullsys" self.clean_data_path = clean_data_path if clean_data_path else module_path+f'/data/stage_data/extracted_clean_data{ext}.pkl' def run(self): logging.info('Data Compilation Routine Started') # check if exists ext = "" if self.mode else "_fullsys" outfile_ec = module_path+f'/data/eval_data/evidence_corpus{ext}.pkl' outfile_rec = module_path+f'/data/eval_data/qa_records{ext}.pkl' if not os.path.exists(outfile_ec) or not os.path.exists(outfile_ec): self.load_data() self.compile_evidence_corpus() self.compile_qa_records() # save data os.makedirs(module_path+'/data/eval_data', exist_ok=True) self.save_data(self.evidence_corpus, outfile_ec) self.save_data(self.qa_records, outfile_rec) logging.info('Data Compilation Routine Finished') else: logging.info('Stage data files have already been created, skipping compilation.') def load_data(self): ''' Loads clean, extracted pickle file from disk ''' self.data = load_pkl_file(filepath=self.clean_data_path) return def compile_evidence_corpus(self): ''' This method compiles all unique wikipedia documents into a dictionary Args: extracted_data (list) Returns: evidence_docs (dict) ''' logging.info('Compiling Evidence Docs') unique_titles = [] evidence_docs = [] for i, rec in enumerate(tqdm(self.data)): if rec['document_title'] not in unique_titles: unique_titles.append(rec['document_title']) fields = {'document_title': rec['document_title'], 'document_url': rec['document_url'], 'document_text_clean': rec['document_text_clean']} evidence_docs.append(fields) logging.info(f'Of the {len(self.data)} records, there are {len(evidence_docs)} unique Wikipedia articles.') self.evidence_corpus = evidence_docs return def compile_qa_records(self): ''' This method loops through the extracted_clean_data list and removes the document_text_clean field from each record Args: extracted_data (list) Returns: slim_data (list) ''' logging.info('Compiling QA Records') qa_records = [] for i, rec in enumerate(tqdm(self.data)): new_rec = {k:v for k,v in rec.items() if k != 'document_text_clean'} qa_records.append(new_rec) self.qa_records = qa_records return @staticmethod def save_data(obj, outfile): ''' Saves the obj to a pickle local file ''' create_pkl_file(obj, outfile) return
python
KEYWORDS = ["dev", "backup", "develop", "int", "internal", "staging", "test"] with open("../../roots.txt") as roots: with open("targets.txt", "w+") as targets: for domain in roots: for keyword in KEYWORDS: target = domain.strip("\n") + "-" + keyword.strip("\n") + ".oss.eu-west-1.aliyuncs.com" + "\n" targets.write(target)
python
# -*- coding: utf-8 -*- """ Module docstring TODO: * Write module docstring """ from .player.dealer import Dealer from .player.player import Player from .carddeck.deck import Deck class Game(): """Class to represent the blackjack Game""" def __init__(self): self.dealer = Dealer() self.player = Player() self.deck = Deck() def __str__(self): result = '' result += f'Dealer:\n{str(self.dealer)}\n\n' result += f'Player:\n{str(self.player)}\n\n' result += f'Deck:\n{str(self.deck)}' return result def deal(self): # clear both dealer and player`s hand self.dealer.hand.clear() self.player.hand.clear() # Populate and shuffle deck self.deck.populate() # Deal 2 cards to the dealer. self.dealer.hand.add_card(self.deck.deal_card()) self.dealer.hand.add_card(self.deck.deal_card()) # Deal 2 cards to the player. self.player.hand.add_card(self.deck.deal_card()) self.player.hand.add_card(self.deck.deal_card()) def hit(self): card = self.deck.deal_card() card.flip() self.player.hand.add_card(card) def stand(self): # Return value if <= 21 else return None return self.player.hand.value if self.player.hand.value <= 21 else None def play_dealer(self): # Flip dealers cards over # self.dealer.hand.cards[1].flip() # Dealer will always hit untill value meets or exceeds 17 while self.dealer.hand.value < 17: self.dealer.hand.add_card(self.deck.deal_card()) def end_round(self): '''Returns True player won, return False dealer wins, None TIE''' if not self.player.hand.bust: # Player is not bust if self.dealer.hand.bust or \ self.player.hand.value > self.dealer.hand.value: # Dealer is bust or player`s hand is greater self.player.balance += self.player.bet * 2 return True elif self.player.hand.value == self.dealer.hand.value: # Tie self.player.balance += self.player.bet return None return False
python
from flask_pymongo import PyMongo from flask_compress import Compress from flask_cors import CORS from flask_bcrypt import Bcrypt from itsdangerous import URLSafeTimedSerializer mongo = PyMongo() flask_bcrypt = Bcrypt() flask_compress = Compress() flask_cors = CORS(resources={"/api/*": {"origins": "*"}}) RECAPTCHA_SITEKEY = None ImgurObject = None Serialize_Secret_Keys = [""] serializer = URLSafeTimedSerializer(Serialize_Secret_Keys)
python
# # Jasy - Web Tooling Framework # Copyright 2013-2014 Sebastian Werner # import json import copy class AbstractNode(list): __slots__ = [ # core data "line", "type", "tokenizer", "start", "end", "rel", "parent", # dynamic added data by other modules "comments", "scope", "values", # node type specific "value", "parenthesized", "fileId", "params", "name", "initializer", "condition", "assignOp", "thenPart", "elsePart", "statements", "statement", "variables", "names", "postfix" ] def __init__(self, tokenizer=None, type=None, args=[]): list.__init__(self) self.start = 0 self.end = 0 self.line = None if tokenizer: token = getattr(tokenizer, "token", None) if token: # We may define a custom type but use the same positioning as another token # e.g. transform curlys in block nodes, etc. self.type = type if type else getattr(token, "type", None) self.line = token.line # Start & end are file positions for error handling. self.start = token.start self.end = token.end else: self.type = type self.line = tokenizer.line self.start = None self.end = None self.tokenizer = tokenizer elif type: self.type = type for arg in args: self.append(arg) def getFileName(self): """Traverses up the tree to find a node with a fileId and returns it.""" node = self while node: fileId = getattr(node, "fileId", None) if fileId is not None: return fileId node = getattr(node, "parent", None) def getUnrelatedChildren(self): """Collects all unrelated children.""" collection = [] for child in self: if not hasattr(child, "rel"): collection.append(child) return collection def getChildrenLength(self, filter=True): """Number of (per default unrelated) children.""" count = 0 for child in self: if not filter or not hasattr(child, "rel"): count += 1 return count def remove(self, kid): """Removes the given kid.""" if kid not in self: raise Exception("Given node is no child!") if hasattr(kid, "rel"): delattr(self, kid.rel) del kid.rel del kid.parent list.remove(self, kid) def insert(self, index, kid): """Inserts the given kid at the given index.""" if index is None: return self.append(kid) if hasattr(kid, "parent"): kid.parent.remove(kid) kid.parent = self return list.insert(self, index, kid) def insertAll(self, index, kids): """Inserts all kids starting with the given index.""" if index is None: for kid in list(kids): self.append(kid) else: for pos, kid in enumerate(list(kids)): self.insert(index + pos, kid) def insertAllReplace(self, orig, kids): """Inserts all kids at the same position as the original node (which is removed afterwards)""" index = self.index(orig) for pos, kid in enumerate(list(kids)): self.insert(index + pos, kid) self.remove(orig) def append(self, kid, rel=None): """Appends the given kid with an optional relation hint.""" # kid can be null e.g. [1, , 2]. if kid: if hasattr(kid, "parent"): kid.parent.remove(kid) # Debug if not isinstance(kid, AbstractNode): raise Exception("Invalid kid: %s" % kid) if hasattr(kid, "tokenizer"): if hasattr(kid, "start"): if not hasattr(self, "start") or self.start is None or kid.start < self.start: self.start = kid.start if hasattr(kid, "end"): if not hasattr(self, "end") or self.end is None or self.end < kid.end: self.end = kid.end kid.parent = self # alias for function if rel is not None: setattr(self, rel, kid) setattr(kid, "rel", rel) # Block None kids when they should be related if not kid and rel: return return list.append(self, kid) def replace(self, kid, repl): """Replaces the given kid with a replacement kid.""" if repl in self: self.remove(repl) self[self.index(kid)] = repl if hasattr(kid, "rel"): repl.rel = kid.rel setattr(self, kid.rel, repl) # cleanup old kid delattr(kid, "rel") elif hasattr(repl, "rel"): # delete old relation on new child delattr(repl, "rel") delattr(kid, "parent") repl.parent = self return kid def toXml(self, format=True, indent=0, tab=" "): """Converts the node to XML.""" lead = tab * indent if format else "" innerLead = tab * (indent + 1) if format else "" lineBreak = "\n" if format else "" relatedChildren = [] attrsCollection = [] for name in self.__slots__: # "type" is used as node name - no need to repeat it as an attribute # "parent" is a relation to the parent node - for serialization we ignore these at the moment # "rel" is used internally to keep the relation to the parent - used by nodes which need to keep track of specific children # "start" and "end" are for debugging only if hasattr(self, name) and name not in ("type", "parent", "comments", "selector", "rel", "start", "end") and name[0] != "_": value = getattr(self, name) if isinstance(value, AbstractNode): if hasattr(value, "rel"): relatedChildren.append(value) elif type(value) in (bool, int, float, str, list, set, dict): if isinstance(value, bool): value = "true" if value else "false" elif type(value) in (int, float): value = str(value) elif type(value) in (list, set, dict): if isinstance(value, dict): value = value.keys() if len(value) == 0: continue try: value = ",".join(value) except TypeError as ex: raise Exception("Invalid attribute list child at: %s: %s" % (name, ex)) attrsCollection.append('%s=%s' % (name, json.dumps(value))) attrs = (" " + " ".join(attrsCollection)) if len(attrsCollection) > 0 else "" comments = getattr(self, "comments", None) scope = getattr(self, "scope", None) selector = getattr(self, "selector", None) if len(self) == 0 and len(relatedChildren) == 0 and (not comments or len(comments) == 0) and not scope and not selector: result = "%s<%s%s/>%s" % (lead, self.type, attrs, lineBreak) else: result = "%s<%s%s>%s" % (lead, self.type, attrs, lineBreak) if comments: for comment in comments: result += '%s<comment context="%s" variant="%s">%s</comment>%s' % (innerLead, comment.context, comment.variant, comment.text, lineBreak) if scope: for statKey in scope: statValue = scope[statKey] if statValue is not None and len(statValue) > 0: if isinstance(statValue, set): statValue = ",".join(statValue) elif isinstance(statValue, dict): statValue = ",".join(statValue.keys()) result += '%s<stat name="%s">%s</stat>%s' % (innerLead, statKey, statValue, lineBreak) if selector: for entry in selector: result += '%s<selector>%s</selector>%s' % (innerLead, entry, lineBreak) for child in self: if not child: result += "%s<none/>%s" % (innerLead, lineBreak) elif not hasattr(child, "rel"): result += child.toXml(format, indent + 1) elif not child in relatedChildren: raise Exception("Oops, irritated by non related: %s in %s - child says it is related as %s" % (child.type, self.type, child.rel)) for child in relatedChildren: result += "%s<%s>%s" % (innerLead, child.rel, lineBreak) result += child.toXml(format, indent + 2) result += "%s</%s>%s" % (innerLead, child.rel, lineBreak) result += "%s</%s>%s" % (lead, self.type, lineBreak) return result def __deepcopy__(self, memo): """Used by deepcopy function to clone AbstractNode instances.""" CurrentClass = self.__class__ # Create copy if hasattr(self, "tokenizer"): result = CurrentClass(tokenizer=self.tokenizer) else: result = CurrentClass(type=self.type) # Copy children for child in self: if child is None: list.append(result, None) else: # Using simple list appends for better performance childCopy = copy.deepcopy(child, memo) childCopy.parent = result list.append(result, childCopy) # Sync attributes # Note: "parent" attribute is handled by append() already for name in self.__slots__: if hasattr(self, name) and not name in ("parent", "tokenizer"): value = getattr(self, name) if value is None: pass elif type(value) in (bool, int, float, str): setattr(result, name, value) elif type(value) in (list, set, dict, CurrentClass): setattr(result, name, copy.deepcopy(value, memo)) # Scope can be assigned (will be re-created when needed for the copied node) elif name == "scope": result.scope = self.scope return result def getSource(self): """Returns the source code of the node.""" if not self.tokenizer: raise Exception("Could not find source for node '%s'" % node.type) if getattr(self, "start", None) is not None: if getattr(self, "end", None) is not None: return self.tokenizer.source[self.start:self.end] return self.tokenizer.source[self.start:] if getattr(self, "end", None) is not None: return self.tokenizer.source[:self.end] return self.tokenizer.source[:] # Map Python built-ins __repr__ = toXml __str__ = toXml def __eq__(self, other): return self is other def __bool__(self): return True
python
from useless import base from useless.base import * class Resolver(CMakePackage): def __init__(self): super().__init__() self.name = 'openvdb' self.depends(require('openexr')) self.depends(require('tbb')) self.depends(require('boost')) self.set('USE_BLOSC','OFF') self.set('USE_EXR', 'ON') def setup(self, src_dir, build_dir, install_dir): super().setup(src_dir, build_dir, install_dir) self.set('Boost_ROOT', src_dir+'/boost/') # self.set('CMAKE_POSITION_INDEPENDENT_CODE', 'TRUE') def download(self): self.checkpoint('download', lambda: download_git( 'https://github.com/AcademySoftwareFoundation/openvdb', self.src_dir, tag='v8.0.0'))
python
# -*- coding: utf-8 -*- """ Diffraction image analysis """ from .alignment import ( align, ialign, shift_image, itrack_peak, masked_register_translation, ) from .calibration import powder_calq from .correlation import mnxc, xcorr from .metrics import ( snr_from_collection, isnr, mask_from_collection, combine_masks, mask_image, trimr, triml, ) from .powder import azimuthal_average from .symmetry import nfold, reflection
python
class AbstractObject(object): def __init__(self): pass def get_class(self, universe): raise NotImplementedError("Subclasses need to implement get_class(universe).") def get_object_layout(self, universe): raise NotImplementedError( "Subclasses need to implement get_object_layout(universe)." ) @staticmethod def is_invokable(): return False def __str__(self): from som.vm.current import current_universe return "a " + self.get_class(current_universe).get_name().get_embedded_string()
python
""" Tests for Galaxy Queue Worker """
python
from io import BytesIO import json import cgi from pathlib import Path from abeja.common.docker_image_name import DockerImageName, ALL_GPU_19_04, ALL_CPU_19_10 from abeja.training import JobDefinition, JobDefinitionVersion # noqa: F401 def test_job_definition_version( requests_mock, api_base_url, job_definition_version_factory, job_definition_response) -> None: version = job_definition_version_factory() # type: JobDefinitionVersion res = job_definition_response( version.organization_id, version.job_definition_id) requests_mock.get( '{}/organizations/{}/training/definitions/{}?include_jobs=false'.format( api_base_url, version.organization_id, version.job_definition_id), json=res) definition = version.job_definition assert definition assert definition.organization_id == version.organization_id assert definition.job_definition_id == version.job_definition_id def test_job_definition_versions(job_definition_factory) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() assert adapter.organization_id == definition.organization_id assert adapter.job_definition_id == definition.job_definition_id def test_get_job_definition_version( requests_mock, api_base_url, job_definition_factory, job_definition_version_response) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() res = job_definition_version_response( adapter.organization_id, adapter.job_definition_id, environment=None ) version_id = res['job_definition_version'] requests_mock.get( '{}/organizations/{}/training/definitions/{}/versions/{}'.format( api_base_url, adapter.organization_id, adapter.job_definition_name, version_id), json=res) version = adapter.get(job_definition_version_id=version_id) assert version assert version.organization_id == adapter.organization_id assert version.job_definition_id == adapter.job_definition_id assert version.job_definition_version_id == version_id assert version.handler == res['handler'] assert version.image == DockerImageName.parse(res['image']) assert version.environment == {} assert version.created_at == res['created_at'] assert version.modified_at == res['modified_at'] assert version.job_definition assert version.job_definition_id == adapter.job_definition_id def test_get_job_definition_versions( requests_mock, api_base_url, job_definition_factory, job_definition_version_response) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() res1 = job_definition_version_response( adapter.organization_id, adapter.job_definition_id, environment=None ) res2 = job_definition_version_response( adapter.organization_id, adapter.job_definition_id, environment={'foo': '1'} ) requests_mock.get( '{}/organizations/{}/training/definitions/{}/versions'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json={ 'entries': [ res1, res2]}) it = adapter.list() assert len(it) == 2 versions = list(it) assert len(versions) == 2 for version, res in zip(versions, [res1, res2]): assert version.organization_id == adapter.organization_id assert version.job_definition_id == adapter.job_definition_id assert version.job_definition_version_id == res['job_definition_version'] assert version.handler == res['handler'] assert version.image == DockerImageName.parse(res['image']) assert version.environment == { } if res['environment'] is None else res['environment'] assert version.created_at == res['created_at'] assert version.modified_at == res['modified_at'] assert version.job_definition assert version.job_definition_id == adapter.job_definition_id def test_get_job_definition_versions_filter_archived( requests_mock, api_base_url, job_definition_factory, job_definition_version_response) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() res1 = job_definition_version_response( adapter.organization_id, adapter.job_definition_id, environment=None ) requests_mock.get( '{}/organizations/{}/training/definitions/{}/versions?filter_archived=exclude_archived'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json={ 'entries': [res1]}) versions = list(adapter.list(filter_archived=True)) assert len(versions) == 1 def test_create_job_definition_version_zip( requests_mock, api_base_url, make_zip_content, job_definition_factory, job_definition_version_response) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() res = job_definition_version_response( adapter.organization_id, adapter.job_definition_id) requests_mock.post( '{}/organizations/{}/training/definitions/{}/versions'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json=res) zip_content = make_zip_content({'train.py': b'print(1)'}) version = adapter.create( BytesIO(zip_content), 'train:main', ALL_GPU_19_04, { 'key': 'value'}, description='new version') assert version assert version.job_definition_version_id == res['job_definition_version'] assert version.job_definition assert version.job_definition_id == adapter.job_definition_id history = requests_mock.request_history assert len(history) == 1 fs = cgi.FieldStorage( fp=BytesIO( history[0].body), headers=history[0].headers, environ={ 'REQUEST_METHOD': 'POST'}) item = fs['parameters'] parameters = json.loads(item.value.decode('utf-8')) assert item.headers['Content-Type'] == 'application/json' assert parameters['handler'] == 'train:main' assert parameters['image'] == 'abeja-inc/all-gpu:19.04' assert parameters['environment'] == {'key': 'value'} item = fs['source_code'] assert item.headers['Content-Type'] == 'application/zip' assert item.value == zip_content def test_create_job_definition_version_files( requests_mock, api_base_url, tmpdir, make_zip_content, job_definition_factory, job_definition_version_response) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() # Make some files files = [] with tmpdir.as_cwd(): d = Path('work') d.mkdir(parents=True, exist_ok=True) path = d / 'test.txt' path.write_bytes(b'test') files.append(str(path)) path = d / 'train.py' path.write_bytes(b'def handler(): pass') files.append(str(path)) res = job_definition_version_response( adapter.organization_id, adapter.job_definition_id) requests_mock.post( '{}/organizations/{}/training/definitions/{}/versions'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json=res) version = adapter.create( files, 'train:handler', ALL_CPU_19_10, { 'KEY': 'VALUE'}, description='new version') assert version assert version.job_definition_version_id == res['job_definition_version'] assert version.job_definition assert version.job_definition_id == adapter.job_definition_id history = requests_mock.request_history assert len(history) == 1 fs = cgi.FieldStorage( fp=BytesIO( history[0].body), headers=history[0].headers, environ={ 'REQUEST_METHOD': 'POST'}) item = fs['parameters'] parameters = json.loads(item.value.decode('utf-8')) assert item.headers['Content-Type'] == 'application/json' assert parameters['handler'] == 'train:handler' assert parameters['image'] == 'abeja-inc/all-cpu:19.10' assert parameters['environment'] == {'KEY': 'VALUE'} item = fs['source_code'] assert item.headers['Content-Type'] == 'application/zip' assert item.value def test_update_job_definition_version( requests_mock, api_base_url, job_definition_factory, job_definition_version_response) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() res = job_definition_version_response( adapter.organization_id, adapter.job_definition_id) version_id = res['job_definition_version'] requests_mock.patch( '{}/organizations/{}/training/definitions/{}/versions/{}'.format( api_base_url, adapter.organization_id, adapter.job_definition_name, version_id), json=res) description = 'new version' version = adapter.update(version_id, description) assert version assert version.job_definition_version_id == version_id assert version.job_definition assert version.job_definition_id == adapter.job_definition_id history = requests_mock.request_history assert len(history) == 1 assert history[0].json() == {'description': description} def test_archive_job_definition_version( requests_mock, api_base_url, training_api_client, job_definition_factory) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() requests_mock.post( '{}/organizations/{}/training/definitions/{}/versions/1/archive'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json={ 'message': "test-1 archived"}) adapter.archive(job_definition_version_id=1) assert requests_mock.called def test_unarchive_job_definition_version( requests_mock, api_base_url, training_api_client, job_definition_factory) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() requests_mock.post( '{}/organizations/{}/training/definitions/{}/versions/1/unarchive'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json={ 'message': "test-1 unarchived"}) adapter.unarchive(job_definition_version_id=1) assert requests_mock.called def test_delete_job_definition_version( requests_mock, api_base_url, training_api_client, job_definition_factory) -> None: definition = job_definition_factory() # type: JobDefinition adapter = definition.job_definition_versions() requests_mock.delete( '{}/organizations/{}/training/definitions/{}/versions/1'.format( api_base_url, adapter.organization_id, adapter.job_definition_name), json={ 'message': "test-1 deleted"}) adapter.delete(job_definition_version_id=1) assert requests_mock.called
python
""" Import as: import dataflow.core.dag_adapter as dtfcodaada """ import logging from typing import Any, Dict, List import core.config as cconfig import dataflow.core.builders as dtfcorbuil import dataflow.core.dag as dtfcordag import dataflow.core.node as dtfcornode import helpers.hdbg as hdbg import helpers.hprint as hprint _LOG = logging.getLogger(__name__) class DagAdapter(dtfcorbuil.DagBuilder): """ Adapt a DAG builder by overriding part of the config and appending nodes. """ def __init__( self, dag_builder: dtfcorbuil.DagBuilder, overriding_config: Dict[str, Any], nodes_to_insert: List[dtfcornode.Node], nodes_to_append: List[dtfcornode.Node], **kwargs, ): """ Constructor. :param dag_builder: a `DagBuilder` containing a single sink :param overriding_config: a template `Config` containing the fields to override. Note that this `Config` can still be a template, i.e., containing dummies that are finally overwritten by callers. :param nodes_to_append: list of tuples `(node name, constructor)` storing the nodes to append to the DAG created from `dag_builder`. The node constructor function should accept only the `nid` and the configuration dict, while all the other inputs need to be already specified. """ super().__init__() hdbg.dassert_isinstance(dag_builder, dtfcorbuil.DagBuilder) self._dag_builder = dag_builder hdbg.dassert_isinstance(overriding_config, cconfig.Config) self._overriding_config = overriding_config hdbg.dassert_container_type(nodes_to_insert, list, tuple) self._nodes_to_insert = nodes_to_insert hdbg.dassert_container_type(nodes_to_append, list, tuple) self._nodes_to_append = nodes_to_append def __str__(self) -> str: txt = [] # txt.append("dag_builder=") txt.append(hprint.indent(str(self._dag_builder), 2)) # txt.append("overriding_config=") txt.append(hprint.indent(str(self._overriding_config), 2)) # txt.append("nodes_to_insert=") txt.append(hprint.indent("\n".join(map(str, self._nodes_to_insert)), 2)) # txt.append("nodes_to_append=") txt.append(hprint.indent("\n".join(map(str, self._nodes_to_append)), 2)) # txt = "\n".join(txt) return txt def get_config_template(self) -> cconfig.Config: config = self._dag_builder.get_config_template() config.update(self._overriding_config) return config def _get_dag( self, config: cconfig.Config, mode: str = "strict" ) -> dtfcordag.DAG: # Remove the nodes that are in config nested_config_template = self._dag_builder.get_config_template() config_diff = cconfig.Config() for key in config.keys(): if key in nested_config_template: config_diff[key] = config[key] _LOG.debug("# config_diff=\n%s", str(config_diff)) dag = self._dag_builder.get_dag(config_diff, mode=mode) _LOG.debug("# dag=\n%s", str(dag)) # if self._nodes_to_insert: _LOG.debug("# Inserting nodes") # To insert a node we need to to assume that there is a single source node. source_nid = dag.get_unique_source() # TODO(gp): Allow to insert more than one node, if needed. hdbg.dassert_eq(len(self._nodes_to_insert), 1) stage, node_ctor = self._nodes_to_insert[0] _LOG.debug(hprint.to_str("stage node_ctor")) head_nid = self._dag_builder._get_nid(stage) node = node_ctor( head_nid, **config[head_nid].to_dict(), ) dag.add_node(node) dag.connect(head_nid, source_nid) if self._nodes_to_append: _LOG.debug("# Appending nodes") # To append a node we need to to assume that there is a single sink node. sink_nid = dag.get_unique_sink() # TODO(gp): Allow to append more than one node, if needed. hdbg.dassert_eq(len(self._nodes_to_append), 1) stage, node_ctor = self._nodes_to_append[0] _LOG.debug(hprint.to_str("stage node_ctor")) tail_nid = self._dag_builder._get_nid(stage) node = node_ctor( tail_nid, **config[tail_nid].to_dict(), ) dag.add_node(node) dag.connect(sink_nid, tail_nid) return dag
python
from distutils.core import setup setup(name='DefenseLab', version='1.0', author='Andrew Meserole', packages=['DefenseLab', ])
python
#!/usr/bin/python #_*_coding:utf-8_*_ import sys # Point类 class Point: lng = '' lat = '' def __init__(self,lng,lat): self.lng = lng self.lat = lat def show(self): print self.lng,"\t",self.lat #采用射线法判断点是否在多边形集内 def isPointsInPolygons(point,xyset): flag = False p = point length = len(xyset) p2 = xyset[length-1] for i in range(0,length): p1 = xyset[i] #点与多边形顶点重合 if (p.lng == p1.lng and p.lat == p1.lat) or (p.lng == p2.lng and p.lat == p2.lat): return True #判断线段两端点是否在射线两侧 if (p2.lat < p.lat and p1.lat >= p.lat) or (p2.lat >= p.lat and p1.lat < p.lat): #线段上与射线 Y 坐标相同的点的 X 坐标 if (p2.lat == p1.lat): x = (p1.lng + p2.lng)/2 else: x = p2.lng - (p2.lat - p.lat)*(p2.lng - p1.lng)/(p2.lat - p1.lat) #点在多边形的边上 if (x == p.lng): return True #射线穿过多边形的边界 if (x > p.lng): flag = not flag p2 = p1 return flag def pointcheck(): #加载多边形点到xyset line = '121.42277777778,31.027666666667,121.42797222222,31.016361111111,121.45088888889,31.023666666667,121.44575,31.035027777778' line = line.strip(',') strList = line.split(',') pointslen = len(strList) xyset = [] for i in range(0,pointslen,2): temp = Point(float(strList[i]),float(strList[i+1])) xyset.append(temp) temp.show() # lxy = '121.42797222222,31.023666666667'.split(',')#里面的点 lxy = '121.42797222222,37.023666666667'.split(',') #外面的点 lx = float(lxy[0]) ly = float(lxy[1]) point = Point(lx,ly) if isPointsInPolygons(point,xyset): return "在里面" return "在外面" #调用函数 if __name__=="__main__": print (pointcheck())
python
from . import utils from discord.utils import get async def update_admins(guild, bot_log): role_admin = get(guild.roles, name='Админ') role_past_admin = get(guild.roles, name='Бивш Админ') for admin in utils.get_members_with_role(guild, role_admin): await bot_log.send(f'{admin.mention}') await utils.remove_all_roles(admin) await admin.add_roles(role_past_admin) await bot_log.send(f'Добре дошли в клуба {role_past_admin.mention}') async def update_hacktues(guild): role_10 = get(guild.roles, name='10ти клас') role_11 = get(guild.roles, name='11ти клас') role_12 = get(guild.roles, name='12ти клас') hacktues = get(guild.roles, name='HackTUES') alumni = get(guild.roles, name='Завършили') for member in utils.get_members_with_role(guild, hacktues): if role_10 in member.roles: await member.remove_roles(role_10) await member.add_roles(role_11) elif role_11 in member.roles: await member.remove_roles(role_11) await member.add_roles(role_12) elif role_12 in member.roles: await member.remove_roles(role_12) await utils.update_and_dm(member, alumni, True) async def update_students(guild, bot_log): role_08 = get(guild.roles, name='8ми клас') role_09 = get(guild.roles, name='9ти клас') role_10 = get(guild.roles, name='10ти клас') role_11 = get(guild.roles, name='11ти клас') role_12 = get(guild.roles, name='12ти клас') roles = { role_11: role_12, role_10: role_11, role_09: role_10, role_08: role_09 } for old_role, new_role in roles.items(): await utils.update_roles(guild, old_role, new_role) await bot_log.send(f'{old_role.mention}, добре дошли в {new_role.mention}') async def update_alumni(guild): role_12 = get(guild.roles, name='12ти клас') role_alumni = get(guild.roles, name='Завършили') for student in utils.get_members_with_role(guild, role_12): await utils.remove_all_roles(student) await utils.update_and_dm(student, role_alumni, False)
python
from statistics import multimode def migratoryBirds(arr): mode = multimode(arr) mode.sort() return mode[0] if __name__ == "__main__": arr = [1 ,2 ,3 ,4 ,5 ,4 ,3 ,2 ,1 ,3 ,4] print(migratoryBirds(arr))
python
'''Test configuration constants, functions ... ''' import subprocess import os import unittest TVM_ROOT_PART='may not need' TVM_SWAP_PART='may not need' TVM_HOSTNAME='cworld.local' TVM_GITREPO_URL = '[email protected]' def product_topdir(): '''return the project's top level directory (according to git) ''' topdir = subprocess.check_output(['git','rev-parse','--show-toplevel'] ).decode('utf-8').strip() if not os.path.isdir(topdir): raise Exception('Not a dir: '+topdir) return topdir class TestThisModule(unittest.TestCase): def setup(self): pass def test_product_topdir(self): '''verify the product_topdir returns a valid directory The .git sub directory is check for existence ''' topdir = product_topdir() self.assertTrue(os.path.isdir(os.path.join(topdir,'.git')), topdir + '/.git is not a directory') if __name__ == '__man__': unittest.main()
python
import pytest from core import helpers @pytest.mark.parametrize('path,expected_prefix', ( ('/', 'en-gb'), ('/ar/', 'ar'), ('/es/industries/', 'es'), ('/zh-hans/industries/', 'zh-hans'), ('/de/industries/aerospace/', 'de'), ('/fr/industries/free-foods/', 'fr'), )) def test_get_language_from_prefix(client, path, expected_prefix): prefix = helpers.get_language_from_prefix(path) assert prefix == expected_prefix @pytest.mark.parametrize('prefixed_url,exp_url', ( ('/de/', '/'), ('/ar/', '/'), ('/es/industries/', '/industries/'), ('/zh-hans/industries/', '/industries/'), ('/de/industries/aerospace/', '/industries/aerospace/'), ('/fr/industries/free-foods/', '/industries/free-foods/'), ( '/es/uk-setup-guide/establish-base-business-uk/', '/uk-setup-guide/establish-base-business-uk/' ), )) def test_get_untranslated_url(prefixed_url, exp_url): url = helpers.get_untranslated_url(prefixed_url) assert url == exp_url
python
from os import path, environ from imgaug import augmenters as iaa from keras import backend as K from keras import optimizers from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau from keras.layers import BatchNormalization, Activation from keras.layers import Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D from keras.models import Model from keras.preprocessing.image import ImageDataGenerator img_width, img_height = 256, 256 channels = 3 input_shape = channels, img_width, img_height if K.image_data_format() == 'channels_first' \ else img_width, img_height, channels train_data_dir = path.join('data', 'train') validation_data_dir = path.join('data', 'validation') nb_train_samples = int(environ.get('TRAINING_SAMPLES', 20)) nb_validation_samples = int(environ.get('VALIDATION_SAMPLES', 20)) batch_size = 16 epochs = 100 input_tensor = Input(shape=input_shape) block1 = BatchNormalization(name='norm_0')(input_tensor) # Block 1 block1 = Conv2D(8, (3, 3), name='conv_11', activation='relu')(block1) block1 = Conv2D(16, (3, 3), name='conv_12', activation='relu')(block1) block1 = Conv2D(32, (3, 3), name='conv_13', activation='relu')(block1) block1 = Conv2D(64, (3, 3), name='conv_14', activation='relu')(block1) block1 = MaxPooling2D(pool_size=(2, 2))(block1) block1 = BatchNormalization(name='norm_1')(block1) block1 = Conv2D(16, 1)(block1) # Block 2 block2 = Conv2D(32, (3, 3), name='conv_21', activation='relu')(block1) block2 = Conv2D(64, (3, 3), name='conv_22', activation='relu')(block2) block2 = Conv2D(64, (3, 3), name='conv_23', activation='relu')(block2) block2 = Conv2D(128, (3, 3), name='conv_24', activation='relu')(block2) block2 = MaxPooling2D(pool_size=(2, 2))(block2) block2 = BatchNormalization(name='norm_2')(block2) block2 = Conv2D(64, 1)(block2) # Block 3 block3 = Conv2D(64, (3, 3), name='conv_31', activation='relu')(block2) block3 = Conv2D(128, (3, 3), name='conv_32', activation='relu')(block3) block3 = Conv2D(128, (3, 3), name='conv_33', activation='relu')(block3) block3 = Conv2D(64, (3, 3), name='conv_34', activation='relu')(block3) block3 = MaxPooling2D(pool_size=(2, 2))(block3) block3 = BatchNormalization(name='norm_3')(block3) # Block 4 block4 = Conv2D(64, (3, 3), name='conv_41', activation='relu')(block3) block4 = Conv2D(32, (3, 3), name='conv_42', activation='relu')(block4) block4 = Conv2D(16, (3, 3), name='conv_43', activation='relu')(block4) block4 = Conv2D(8, (2, 2), name='conv_44', activation='relu')(block4) block4 = MaxPooling2D(pool_size=(2, 2))(block4) block4 = BatchNormalization(name='norm_4')(block4) block4 = Conv2D(2, 1)(block4) block5 = GlobalAveragePooling2D()(block4) output = Activation('softmax')(block5) model = Model(inputs=[input_tensor], outputs=[output]) model.summary() model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), metrics=['accuracy']) # Initiate the train and test generators with data Augmentation sometimes = lambda aug: iaa.Sometimes(0.6, aug) seq = iaa.Sequential([ iaa.GaussianBlur(sigma=(0, 1.0)), iaa.Sharpen(alpha=1, lightness=0), iaa.CoarseDropout(p=0.1, size_percent=0.15), sometimes(iaa.Affine( scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)}, translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)}, rotate=(-30, 30), shear=(-16, 16))) ]) train_datagen = ImageDataGenerator( rescale=1. / 255, preprocessing_function=seq.augment_image, horizontal_flip=True, vertical_flip=True) test_datagen = ImageDataGenerator( rescale=1. / 255, horizontal_flip=True, vertical_flip=True) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_height, img_width), class_mode='categorical') checkpoint = ModelCheckpoint('f1.h5', monitor='acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=0, mode='auto', cooldown=0, min_lr=0) model.fit_generator( train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size, callbacks=[checkpoint, reduce_lr] )
python
# mb, 2012-05-26, 2013-02-28 import os import sys import subprocess import shutil from datetime import datetime ospj = os.path.join dest_path_to_extensions = '/home/mbless/public_html/TYPO3/extensions' tempdir = '/home/mbless/HTDOCS/render-ter-extensions/temp' proceeding = True stats = {} def walk_ter_extensions_index_html(rootfolder, f2=sys.stdout): prelpath = len(rootfolder) proceeding = True for path, dirs, files in os.walk(rootfolder): proceedwithfile = True destdir = path if not proceeding: dirs[:] = [] else: if os.path.exists(os.path.join(path, 'manual-is-not-available.txt')): stats['manual-is-not-available.txt'] = stats.get('manual-is-not-available.txt', 0) + 1 else: for afile in ['index.html', 'manual.sxw', 'manual.html', 'manual.rst']: if os.path.exists(os.path.join(path, afile)): stats[afile] = stats.get(afile, 0) + 1 for afile in files: leaf = os.path.split(path)[1] vsplitted = leaf.split('.') if afile.lower() == 'index.html' and (leaf=='latest' or len(vsplitted) == 3): if leaf == 'latest': vsplitted = ['999','999','999'] try: vsplitted = [int(v) for v in vsplitted] skip = False except ValueError: skip = True if skip: continue left, version = os.path.split(path) left, extkey = os.path.split(left) v1, v2, v3 = vsplitted f2.write(extkey + ',%05d.'%v1 + '%05d.'%v2 + '%05d'%v3 + ',' + version + '\n') document_part_1 = """\ <?xml version="1.0" encoding="utf-8" ?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Extensions</title> <link rel="stylesheet" href="https://docs.typo3.org/css/typo3_docutils_styles.css" type="text/css" /> </head> <body> <div class="document"> """ document_part_2 = """\ </div> </body> </html> """ def main( timestr=None): tempfile = ospj(tempdir, 'tempfile-ter-manuals-1.txt') f2 = file(tempfile,'w') walk_ter_extensions_index_html(dest_path_to_extensions, f2) f2.close() f1 = file(ospj(tempdir, 'tempfile-ter-manuals-1.txt')) f2 = file(ospj(tempdir, 'tempfile-ter-manuals-2.txt'), 'w') subprocess.call('sort', stdin=f1, stdout=f2) f1.close() f2.close() extkey0 = None version0 = None firstletter0 = None firstletter00 = '' cntlines = 0 cntlinks = 0 f1 = file(ospj(tempdir, 'tempfile-ter-manuals-2.txt')) f2 = file(ospj(tempdir, 'tempfile-ter-manuals-3-index.html'), 'w') f2.write(document_part_1) if timestr is None: timestr = str(datetime.now())[:19] f2.write('<pre>') f2.write(timestr) f2.write(" updated every 2 hours at HH:10\n") f2.write('</pre>\n') else: f2.write('<pre>') f2.write("This list reflects extensions.xml.gz %s\n" % timestr) f2.write("Updated every 2 hours at HH:10\n") f2.write('</pre>\n') #f2.write('<p>' # 'The links will open in a second window. I you arrange the two windows ' # 'side by side you can click an extension in this window and ' # 'immediately read in the other.</p>' #) if timestr < '2012-12-30 16:00:00': f2.write('<p><b>' "Due to the way TER works it may take " 'up to a day until new manuals appear.' '</b></p>' ) if timestr < '2011-12-30 16:00:00': f2.write('<p><b>' "http://typo3.org doesn\'t hand out new 'manual.sxw' files at the moment. " 'So we are not getting any updates at the moment. This will be repaired ' 'once typo3.org works again. ~Martin,&nbsp;2012-05-21&nbsp;18:35' '</b></p>' ) for line in f1: cntlines += 1 extkey, dummy, version = line.strip().split(',') firstletter = extkey[0] if not extkey0 is None: if firstletter0 != firstletter00: f2.write('<br /><br /><b>%s</b><br />\n' % firstletter0) firstletter00 = firstletter0 if extkey != extkey0: f2.write('<a href="%s/%s/" title="%s %s" >%s</a><br />\n' % (extkey0, version0, extkey0, version0, extkey0)) cntlinks += 1 firstletter0 = firstletter extkey0 = extkey version0 = version if not extkey0 is None: if firstletter0 != firstletter00: f2.write('<br /><br /><b>%s</b><br />\n' % firstletter0) firstletter00 = firstletter0 f2.write('<a href="%s/%s/" title="%s %s" >%s</a>\n' % (extkey0, version0, extkey0, version0, extkey0)) f2.write('<pre>\n') f2.write('%s\n\n' % (str(datetime.now())[:19])) f2.write('Available:\n') f2.write('\n') f2.write('%6d links on this page to different extensions.\n' % cntlinks) f2.write(' The links point to the latest version which has an index.html\n') f2.write('\n') f2.write('%6d with manual.sxw (made by extension author)\n' % stats['manual.sxw']) f2.write('%6d with manual.html (made from manual.sxw)\n' % stats['manual.html']) f2.write('%6d with manual.rst (made from manual.html)\n' % stats['manual.rst']) f2.write('%6d with index.html (made from manual.rst)\n' % stats['index.html']) f2.write('\n') f2.write("%6d don't have a manual at http://typo3.org/extension-manuals/EXTKEY/VERSION/sxw/?no_cache=1\n" % stats['manual-is-not-available.txt']) f2.write('</pre>') f2.write(document_part_2) f2.close() if (0): # moved this functionality to the caller to make everything more "atomic" srcfile = ospj(tempdir, 'tempfile-ter-manuals-3-index.html') destfile = os.path.join(dest_path_to_extensions, 'index.html') shutil.copyfile(srcfile, destfile) if __name__ == "__main__": main()
python
def test(i): print("test", i) def add_test(mf): def add_test_print(i): print("added to test", i) mf.register_event("test", add_test_print, unique=False) def main(event): event.test(0) event.add_test() event.test(1) def register(mf): mf.register_event("test", test, unique=False) mf.register_event("add_test", add_test, unique=False) mf.register_event("main", main, unique=False)
python
""" Entendendo Interadores e Iteraveis #Interador - Um objeto que poder ser iterado - Um objeto que retorna um dado, sendo um elemento por vez quando uma função next() é chamada; #Interaveis - Um objeto que irá retorna um interator quando inter() for chamada. """
python
from infosystem.common.subsystem import router class Router(router.Router): def __init__(self, collection, routes=[]): super().__init__(collection, routes) @property def routes(self): # TODO(samueldmq): is this the best way to re-write the defaults to # only change bypass=true for create ? return [ { 'action': 'create', 'method': 'POST', 'url': self.collection_url, 'callback': 'create', 'bypass': True }, { 'action': 'get', 'method': 'GET', 'url': self.resource_url, 'callback': 'get' }, { 'action': 'delete', 'method': 'DELETE', 'url': self.resource_url, 'callback': 'delete' } ]
python
from ubikagent import Project from ubikagent.introspection import get_methods class DummyAgent: """Test class needed by `InstantiableProject` and `TestProject`.""" pass class NonInstantiableProject(Project): """Test class needed by `TestProject`.""" pass class InstantiableProject(Project): """Test class needed by `TestProject` and `TestIntrospection`.""" ENV_ID = 'test-v0' AGENT_CLASS = DummyAgent def no_args(self): pass def pos_arg(self, argument): pass def pos_arg_with_explicit_type(self, argument: int): pass def kwarg_with_implicit_int_type(self, argument=1): pass def kwarg_with_default_none(self, argument=None): pass def kwarg_with_explicit_int_type(self, argument: int = 1): pass def kwarg_with_implicit_bool_type(self, argument=True): pass def kwarg_with_implicit_string_type(self, argument='a_string'): pass class TestIntrospection: """Tests reading methods and arguments from `Project` and its subclasses to be used to generate command line help.""" def setup_class(cls): cls.instance = InstantiableProject() cls.methods = get_methods(cls.instance) def test_project_method_without_args(self): method_name = 'no_args' argument = self.methods[method_name] assert argument == [] def test_project_method_with_an_arg(self): method_name = 'pos_arg' expected_name = 'argument' expected_kwarg = False expected_default = None expected_type = None expected_doc = None arguments = self.methods[method_name] first_argument = arguments[0] assert first_argument == (expected_name, expected_kwarg, expected_default, expected_type, expected_doc) def test_project_method_with_an_arg_with_explicit_type(self): method_name = 'pos_arg_with_explicit_type' expected_name = 'argument' expected_default = None expected_type = int arguments = self.methods[method_name] first_argument = arguments[0] argument_name, is_kwarg, argument_default, argument_type, _ = first_argument assert argument_name == expected_name assert is_kwarg is False assert argument_default == expected_default assert argument_type == expected_type def test_project_method_default_none(self): method_name = 'kwarg_with_default_none' expected_name = 'argument' expected_default = None expected_type = None arguments = self.methods[method_name] first_argument = arguments[0] argument_name, is_kwarg, argument_default, argument_type, _ = first_argument assert argument_name == expected_name assert is_kwarg is True assert argument_default == expected_default assert argument_type == expected_type def test_project_method_with_int_default(self): method_name = 'kwarg_with_implicit_int_type' expected_name = 'argument' expected_default = 1 expected_type = int arguments = self.methods[method_name] first_argument = arguments[0] argument_name, is_kwarg, argument_default, argument_type, _ = first_argument assert argument_name == expected_name assert is_kwarg is True assert argument_default == expected_default assert argument_type == expected_type def test_project_method_with_int_type(self): method_name = 'kwarg_with_explicit_int_type' expected_name = 'argument' expected_default = 1 expected_type = int expected_doc = None arguments = self.methods[method_name] first_argument = arguments[0] argument_name, is_kwarg, argument_default, argument_type, _ = first_argument assert argument_name == expected_name assert is_kwarg is True assert argument_default == expected_default assert argument_type == expected_type def test_project_method_with_bool_default(self): method_name = 'kwarg_with_implicit_bool_type' expected_name = 'argument' expected_default = True expected_type = bool expected_doc = None arguments = self.methods[method_name] first_argument = arguments[0] argument_name, is_kwarg, argument_default, argument_type, _ = first_argument assert argument_name == expected_name assert is_kwarg is True assert argument_default == expected_default assert argument_type == expected_type class TestProject: """Tests instantiating a `Project`.""" def test_instantiating_project(self): instance = InstantiableProject() def test_instantiating_project_without_variables_fails(self): try: instance = NonInstantiableProject() except Exception: pass else: raise AssertionError( "Instantiating did not raise exception when it should have")
python
# coding:utf-8 import threading import redlock class Locker(object): def __init__(self,resource,ttl=0,servers=[{"host": "localhost", "port": 6379, "db": 0}, ]): self.servers = servers self.resource = resource self.ttl = ttl self.dlm = None self.r = None def lock(self): self.dlm = redlock.Redlock(self.servers) self.r = self.dlm.lock( self.resource,self.ttl) if not self.r: return False return True def unlock(self): self.dlm.unlock(self.r) # import time # lock = redlock.RedLock("distributed_lock", # connection_details=[ # {'host':'172.16.109.1','port':6379,'db':0} # # ]) # # lock.acquire() # print 'enter lock...' # time.sleep(10000) # lock.release()
python
#!/usr/bin/env python # Copyright 2020 MaaT Pharma # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############# # This script extracts the sequences with a length greater than or equal to a length threshold from a FASTA file. # python filter_FASTA_by_seq_length.py in.fasta out.fasta 1000 ############# from Bio import SeqIO import sys, os if len(sys.argv) == 4 : fasta_file = sys.argv[1] output_file = sys.argv[2] length = int(sys.argv[3]) output_file = open(output_file, "w") if os.path.isfile(fasta_file) : with open(fasta_file, 'r') as ff: for seq_record in SeqIO.parse(ff, "fasta"): seq_length = len(seq_record.seq) - seq_record.seq.count("N") if (seq_length >= length) : SeqIO.write(seq_record, output_file, "fasta") output_file.close()
python
import logging from io import BytesIO from datetime import datetime, timezone from kermes_infra.mail import MailService from kermes_infra.repositories import FileRepository, UserRepository, EBookRepository from kermes_infra.queues import SQSConsumer from kermes_infra.messages import DeliverEBookMessage, CleanUpMessage class Postmaster: def __init__( self, user_repository: UserRepository, ebook_repository: EBookRepository, file_repository: FileRepository, mail_service: MailService, housekeeper_queue_producer: SQSConsumer, logger: logging.Logger, ) -> None: self.user_repository = user_repository self.ebook_repository = ebook_repository self.file_repository = file_repository self.mail_service = mail_service self.housekeeper_queue_producer = housekeeper_queue_producer self.logger = logger def process_message(self, message_json: str) -> bool: self.logger.debug(f"processing message {message_json}") # parse the message deliver_msg = DeliverEBookMessage.from_json(message_json) # fetch the user record user = self.user_repository.get(deliver_msg.user_id) if user is None: self.logger.error(f"couldn't fetch user with id {deliver_msg.user_id}") return False # fetch the ebook record ebook = self.ebook_repository.get(user.user_id, deliver_msg.ebook_id) if ebook is None: self.logger.error(f"couldn't fetch ebook with id {deliver_msg.ebook_id} for user {user.user_id}") return False # fetch the ebook file from S3 content_key = ebook.kindle_content_key if user.prefer_kindle else ebook.content_key ebook_content = self.file_repository.get(content_key) if ebook_content is None: self.logger.error(f"couldn't fetch ebook content for key {content_key}") return False # send the ebook message attachment_filename = "ebook.mobi" if user.prefer_kindle else "ebook.epub" if not self.mail_service.send_message( user.prefer_kindle, user.delivery_email, "Kermes delivery!", "This is your ebook!", BytesIO(ebook_content.read()), attachment_filename, ): self.logger.error(f"couldn't deliver ebook {ebook.ebook_id} for user {user.user_id}") return False self.housekeeper_queue_producer.send_message(CleanUpMessage(user.user_id, ebook.ebook_id).to_json()) ebook.sent = True ebook.sent_date = datetime.now(tz=timezone.utc) if not self.ebook_repository.put(ebook): self.logger.error(f"couldn't update ebook {ebook.ebook_id} with sent status") return False return True
python
# values_from_literature.py (flowsa) # !/usr/bin/env python3 # coding=utf-8 """ Values from the literature used for data allocation are specified here and can be called on using functions. """ import pandas as pd import numpy as np from flowsa.common import datapath def get_US_urban_green_space_and_public_parks_ratio(): """ calculates weighted average of urban green space and public parks in national total urban areas Based on weighted average of 44 cities based on city population. weighted average value = 12.35% Larson LR, Jennings V, Cloutier SA (2016) Public Parks and Wellbeing in Urban Areas of the United States. PLoS ONE 11(4): e0153211. https://doi.org/10.1371/journal.pone.0153211 """ # load Larson's saved SI data df = pd.read_csv(datapath + "Larson_UrbanPublicParks_SI.csv") # calculate a weighted value for ratio of urban land # that belongs to parks based on city populations # weighted average function try: wm = lambda x: np.ma.average(x, weights=df.loc[x.index, "CityPop2010"]) except ZeroDivisionError: wm = 0 # column to weight agg_funx = {"ParkPercent-2014": wm} # weighted averages as value value_series = df.agg(agg_funx) value = value_series[0] return value def get_Canadian_to_USD_exchange_rate(year): """ Return exchange rate (Canadian $/USD) From https://www.federalreserve.gov/releases/h10/current/ on 09/07/2020 :param year: :return: """ er = ({'2000': '1.4855', '2001': '1.5487', '2002': '1.5704', '2003': '1.4008', '2004': '1.3017', '2005': '1.2115', '2006': '1.134', '2007': '1.0734', '2008': '1.066', '2009': '1.1412', '2010': '1.0298', '2011': '0.9887', '2012': '0.9995', '2013': '1.03', '2014': '1.1043', '2015': '1.2791', '2016': '1.3243', '2017': '1.2984', '2018': '1.2957', '2019': '1.3269' }) exchange_rate = er.get(year) return exchange_rate def get_area_of_urban_land_occupied_by_houses_2013(): """ Reported area of urban land occupied by houses in 2013 from the USDA ERS Major Land Uses Report :return: """ acres_to_sq_m_conversion = 4046.86 # value originally reported in million acres area_urban_residence = 32.8 # convert to square meters area_urban_residence = area_urban_residence * 1000000 * acres_to_sq_m_conversion return area_urban_residence def get_area_of_rural_land_occupied_by_houses_2013(): """ Reported area of urban land occupied by houses in 2013 from the USDA ERS Major Land Uses Report :return: """ acres_to_sq_m_conversion = 4046.86 # value originally reported in million acres area_rural_residence = 106.3 # convert to square meters area_rural_residence = area_rural_residence * 1000000 * acres_to_sq_m_conversion return area_rural_residence def get_commercial_and_manufacturing_floorspace_to_land_area_ratio(): """ The additional land area associated with commercial and manufacturing buildings (parking, sinage, landscaping) Based on original USEEIO assumption :return: ratio of land area to total floorspace assumption """ floor_space_to_land_area_ratio = 0.25 return floor_space_to_land_area_ratio def get_open_space_fraction_of_urban_area(): """ Assumption on the fraction of urban areas that is open space Based on Lin Zeng's 2020 paper :return: fraction of open space in urban areas """ value = 0.1 return value def get_urban_land_use_for_airports(): """ Based on Lin Zeng's 2020 paper :return: """ value = 0.05 return value def get_urban_land_use_for_railroads(): """ Based on Lin Zeng's 2020 paper :return: """ value = 0.05 return value def get_fraction_of_urban_local_road_area_for_parking(): """ Based on Lin Zeng's 2020 paper :return: """ value = 0.25 return value def get_transportation_sectors_based_on_FHA_fees(): """ Values from https://www.fhwa.dot.gov/policy/hcas/addendum.cfm Website accessed 11/02/2020 Data from 1997 :return: """ fha_dict = ({'Truck transportation': {'NAICS_2012_Code': '484', 'ShareOfFees': 0.329}, 'Transit and ground passenger transportation': {'NAICS_2012_Code': '485', 'ShareOfFees': 0.001}, 'State and local government passenger transit': {'NAICS_2012_Code': 'S00201', 'ShareOfFees': 0.001}, 'Personal consumption expenditures': {'NAICS_2012_Code': 'F01000', 'ShareOfFees': 0.669} }) return fha_dict
python
""" sentry.plugins.base.v2 ~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function __all__ = ('Plugin2',) import logging from django.http import HttpResponseRedirect from threading import local from sentry.plugins.base.response import Response class PluginMount(type): def __new__(cls, name, bases, attrs): new_cls = type.__new__(cls, name, bases, attrs) if IPlugin2 in bases: return new_cls if not new_cls.title: new_cls.title = new_cls.__name__ if not new_cls.slug: new_cls.slug = new_cls.title.replace(' ', '-').lower() if not hasattr(new_cls, 'logger'): new_cls.logger = logging.getLogger('sentry.plugins.%s' % (new_cls.slug,)) return new_cls class IPlugin2(local): """ Plugin interface. Should not be inherited from directly. A plugin should be treated as if it were a singleton. The owner does not control when or how the plugin gets instantiated, nor is it guaranteed that it will happen, or happen more than once. >>> from sentry.plugins import Plugin2 >>> >>> class MyPlugin(Plugin2): >>> def get_title(self): >>> return 'My Plugin' As a general rule all inherited methods should allow ``**kwargs`` to ensure ease of future compatibility. """ # Generic plugin information title = None slug = None description = None version = None author = None author_url = None resource_links = () # Configuration specifics conf_key = None conf_title = None project_conf_form = None project_conf_template = 'sentry/plugins/project_configuration.html' # Global enabled state enabled = True can_disable = True # Should this plugin be enabled by default for projects? project_default_enabled = False def _get_option_key(self, key): return '%s:%s' % (self.get_conf_key(), key) def is_enabled(self, project=None): """ Returns a boolean representing if this plugin is enabled. If ``project`` is passed, it will limit the scope to that project. >>> plugin.is_enabled() """ if not self.enabled: return False if not self.can_disable: return True if not self.can_enable_for_projects(): return True if project: project_enabled = self.get_option('enabled', project) if project_enabled is not None: return project_enabled else: return self.project_default_enabled return True def reset_options(self, project=None, user=None): from .helpers import reset_options return reset_options(self.get_conf_key(), project, user) def get_option(self, key, project=None, user=None): """ Returns the value of an option in your plugins keyspace, or ``None`` if one is not present. If ``project`` is passed, it will limit the scope to that project's keyspace. >>> value = plugin.get_option('my_option') """ from sentry.plugins.helpers import get_option return get_option(self._get_option_key(key), project, user) def set_option(self, key, value, project=None, user=None): """ Updates the value of an option in your plugins keyspace. If ``project`` is passed, it will limit the scope to that project's keyspace. >>> plugin.set_option('my_option', 'http://example.com') """ from sentry.plugins.helpers import set_option return set_option(self._get_option_key(key), value, project, user) def unset_option(self, key, project=None, user=None): """ Removes an option in your plugins keyspace. If ``project`` is passed, it will limit the scope to that project's keyspace. >>> plugin.unset_option('my_option') """ from sentry.plugins.helpers import unset_option return unset_option(self._get_option_key(key), project, user) def get_conf_key(self): """ Returns a string representing the configuration keyspace prefix for this plugin. """ if not self.conf_key: return self.get_conf_title().lower().replace(' ', '_') return self.conf_key def get_conf_title(self): """ Returns a string representing the title to be shown on the configuration page. """ return self.conf_title or self.get_title() def has_project_conf(self): return self.project_conf_form is not None def can_enable_for_projects(self): """ Returns a boolean describing whether this plugin can be enabled on a per project basis """ return True # Response methods def redirect(self, url): """ Returns a redirect response type. """ return HttpResponseRedirect(url) def render(self, template, context=None): """ Given a template name, and an optional context (dictionary), returns a ready-to-render response. Default context includes the plugin instance. >>> plugin.render('template.html', {'hello': 'world'}) """ if context is None: context = {} context['plugin'] = self return Response(template, context) # The following methods are specific to web requests def get_title(self): """ Returns the general title for this plugin. >>> plugin.get_title() """ return self.title def get_description(self): """ Returns the description for this plugin. This is shown on the plugin configuration page. >>> plugin.get_description() """ return self.description def get_resource_links(self): """ Returns a list of tuples pointing to various resources for this plugin. >>> def get_resource_links(self): >>> return [ >>> ('Documentation', 'http://sentry.readthedocs.org'), >>> ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'), >>> ('Source', 'https://github.com/getsentry/sentry'), >>> ] """ return self.resource_links def get_rules(self, **kwargs): """ Return a list of Rule classes to add to the registry. >>> def get_rules(self, **kwargs): >>> return [MyCustomRule] """ return [] def get_actions(self, request, group, **kwargs): """ Return a list of available actions to append this aggregate. Examples of built-in actions are "Mute Event" and "Remove Data". An action is a tuple containing two elements: ('Action Label', '/uri/to/action/') >>> def get_actions(self, request, group, **kwargs): >>> return [('Google', 'http://google.com')] """ return [] def get_annotations(self, request, group, **kwargs): """ Return a list of annotations to append to this aggregate. An example of an annotation might be "Needs Fix" or "Task #123". The properties of each tag must match the constructor for :class:`sentry.plugins.Annotation` >>> def get_annotations(self, request, group, **kwargs): >>> task_id = GroupMeta.objects.get_value(group, 'myplugin:tid') >>> if not task_id: >>> return [] >>> return [{'label': '#%s' % (task_id,)}] """ return [] def get_notifiers(self, **kwargs): """ Return a list of notifiers to append to the registry. Notifiers must extend :class:`sentry.plugins.Notifier`. >>> def get_notifiers(self, **kwargs): >>> return [MyNotifier] """ return [] def get_tags(self, event, **kwargs): """ Return a list of additional tags to add to this instance. A tag is a tuple containing two elements: ('tag-key', 'tag-value') >>> def get_tags(self, event, **kwargs): >>> return [('tag-key', 'tag-value')] """ return [] def get_event_preprocessors(self, **kwargs): """ Return a list of preprocessors to apply to the given event. A preprocessor is a function that takes the normalized data blob as an input and returns modified data as output. If no changes to the data are made it is safe to return ``None``. >>> def get_event_preprocessors(self, **kwargs): >>> return [lambda x: x] """ return [] def get_feature_hooks(self, **kwargs): """ Return a list of callables to check for feature status. >>> from sentry.features import FeatureHandler >>> >>> class NoRegistration(FeatureHandler): >>> features = set(['auth:register']) >>> >>> def has(self, feature, actor): >>> return False >>> def get_feature_hooks(self, **kwargs): >>> return [NoRegistration()] """ return [] class Plugin2(IPlugin2): """ A plugin should be treated as if it were a singleton. The owner does not control when or how the plugin gets instantiated, nor is it guaranteed that it will happen, or happen more than once. """ __version__ = 2 __metaclass__ = PluginMount
python
# Copyright (C) 2019 Analog Devices, Inc. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # - Neither the name of Analog Devices, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # - The use of this software may or may not infringe the patent rights # of one or more patent holders. This license does not release you # from the requirement that you obtain separate licenses from these # patent holders to use this software. # - Use of the software either in source or binary form, must be run # on or directly connected to an Analog Devices Inc. component. # # THIS SOFTWARE IS PROVIDED BY ANALOG DEVICES "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. # # IN NO EVENT SHALL ANALOG DEVICES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, INTELLECTUAL PROPERTY # RIGHTS, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import adi import matplotlib.pyplot as plt import numpy as np from scipy import signal import time # Create radio sdr = adi.Pluto() # Configure properties sdr.rx_rf_bandwidth = 4000000 sdr.rx_lo = 2000000000 sdr.tx_lo = 2000000000 sdr.tx_cyclic_buffer = True sdr.tx_hardwaregain = -30 sdr.gain_control_mode = 'slow_attack' # Read properties print("RX LO %s" % (sdr.rx_lo)) # Create a sinewave waveform fs = int(sdr.sample_rate) fc = 3000000 N = 1024 ts = 1/float(fs) t = np.arange(0, N*ts, ts) i = np.cos(2*np.pi*t*fc) * 2**14 q = np.sin(2*np.pi*t*fc) * 2**14 iq = i + 1j*q # Send data sdr.tx(iq) # Collect data for r in range(20): x = sdr.rx() f, Pxx_den = signal.periodogram(x, fs) plt.clf() plt.semilogy(f, Pxx_den) plt.ylim([1e-7, 1e2]) plt.xlabel('frequency [Hz]') plt.ylabel('PSD [V**2/Hz]') plt.draw() plt.pause(0.05) time.sleep(0.1) plt.show()
python
# Dindo Bot # Copyright (c) 2018 - 2019 AXeL from lib.shared import LogType, DebugLevel from lib import tools, parser from .job import JobThread class BotThread(JobThread): def __init__(self, parent, game_location, start_from_step, repeat_path, account_id, disconnect_after): JobThread.__init__(self, parent, game_location) self.start_from_step = start_from_step self.repeat_path = repeat_path self.account_id = account_id self.disconnect_after = disconnect_after self.exit_game = parent.settings['Account']['ExitGame'] def run(self): self.start_timer() self.debug('Bot thread started', DebugLevel.Low) # connect to account account_connected = False if self.account_id is not None: self.debug('Connect to account (account_id: %s)' % self.account_id) self.connect(self.account_id) account_connected = True # check for pause self.pause_event.wait() # get instructions & interpret them if not self.suspend: self.debug('Bot path: %s, repeat: %d' % (self.parent.bot_path, self.repeat_path)) if self.parent.bot_path: instructions = tools.read_file(self.parent.bot_path) repeat_count = 0 while repeat_count < self.repeat_path: # check for pause or suspend self.pause_event.wait() if self.suspend: break # start interpretation self.interpret(instructions) repeat_count += 1 # tell user that we have complete the path if not self.suspend: self.log('Bot path completed', LogType.Success) if not self.suspend: # disconnect account if account_connected and self.disconnect_after: self.debug('Disconnect account') self.disconnect(self.exit_game) # reset bot window buttons self.reset() self.debug('Bot thread ended, elapsed time: ' + self.get_elapsed_time(), DebugLevel.Low) def interpret(self, instructions): # split instructions lines = instructions.splitlines() # ignore instructions before start step if self.start_from_step > 1 and self.start_from_step <= len(lines): self.debug('Start from step: %d' % self.start_from_step) step = self.start_from_step - 1 lines = lines[step:] for i, line in enumerate(lines, start=1): # check for pause or suspend self.pause_event.wait() if self.suspend: break # parse instruction self.debug('Instruction (%d): %s' % (i, line), DebugLevel.Low) instruction = parser.parse_instruction(line) self.debug('Parse result: ' + str(instruction), DebugLevel.High) # begin interpretation if instruction['name'] == 'Move': self.move(instruction['value']) elif instruction['name'] == 'Enclos': self.check_enclos(instruction['location'], instruction['type']) elif instruction['name'] == 'Zaap': self.use_zaap(instruction['from'], instruction['to']) elif instruction['name'] == 'Zaapi': self.use_zaapi(instruction['from'], instruction['to']) elif instruction['name'] == 'Collect': self.collect(instruction['map'], instruction['store_path']) elif instruction['name'] == 'Click': coordinates = ( int(instruction['x']), int(instruction['y']), int(instruction['width']), int(instruction['height']) ) if instruction['twice'] == 'True': self.double_click(coordinates) else: self.click(coordinates) elif instruction['name'] == 'Wait': if instruction['pause'] == 'True': self.wait() elif instruction['duration'].isdigit(): self.sleep(int(instruction['duration'])) elif instruction['name'] == 'PressKey': self.press_key(instruction['value']) elif instruction['name'] == 'TypeText': self.type_text(instruction['value']) elif instruction['name'] == 'Connect': if instruction['account_id'].isdigit(): account_id = int(instruction['account_id']) else: account_id = instruction['account_id'] self.connect(account_id) elif instruction['name'] == 'Disconnect': self.disconnect(instruction['value']) else: self.debug('Unknown instruction', DebugLevel.Low)
python
class ForeignCountry: def __init__(self, code): self.code = code self.name = "Paese Estero"
python
import json import pytest from tests.unit.resources import searched_observable from trustar2.models.searched_observable import SearchedObservable from trustar2.trustar_enums import ObservableTypes VALUE = "2.2.2.2" TYPE = ObservableTypes.IP4.value FIRST_SEEN = 1623273177255 LAST_SEEN = 1623701072520 ENCLAVE_GUIDS = ["test-enclave-guid"] TAGS = ["test-tag"] @pytest.fixture def searched_observable_json(): return json.loads(searched_observable) @pytest.fixture def searched_observable_obj(): return SearchedObservable( value=VALUE, type=TYPE, first_seen=FIRST_SEEN, last_seen=LAST_SEEN, enclave_guids=ENCLAVE_GUIDS, tags=TAGS ) def test_searched_observable_deserialization(searched_observable_json): searched_observable = SearchedObservable.from_dict(searched_observable_json) assert searched_observable.value == VALUE assert searched_observable.type == TYPE assert searched_observable.first_seen == FIRST_SEEN assert searched_observable.last_seen == LAST_SEEN assert searched_observable.enclave_guids == ENCLAVE_GUIDS assert searched_observable.tags == TAGS def test_searched_observable_serialization(searched_observable_obj, searched_observable_json): assert searched_observable_obj.serialize() == searched_observable_json def test_searched_observable_repr(searched_observable_obj): assert searched_observable_obj.__repr__() == "SearchedObservable(type=IP4, value=2.2.2.2)"
python
#!/usr/bin/env python # BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE
python
import ipywidgets as widgets a = widgets.IntText(description='Value A') b = widgets.IntSlider(description='Value B') vbox = widgets.VBox(children=[a, b]) vbox
python
""" Char. number range | UTF-8 octet sequence (hexadecimal) | (binary) --------------------+--------------------------------------------- 0000 0000-0000 007F | 0xxxxxxx 0000 0080-0000 07FF | 110xxxxx 10xxxxxx 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx """ class Solution(object): def validUtf8(self, data): """ :type data: List[int] :rtype: bool """ i = 0 datalen = len(data) try: while i < datalen: b0 = data[i] if b0 <= 0b01111111: # 0xxxxxxx i += 1 elif b0 <= 0b11011111: if not (0b10000000 <= data[i+1] <= 0b10111111): return False i += 2 elif b0 <= 0b11101111: if not (0b10000000 <= data[i+1] <= 0b10111111): return False if not (0b10000000 <= data[i+2] <= 0b10111111): return False i += 3 elif b0 <= 0b11110111: if not (0b10000000 <= data[i+1] <= 0b10111111): return False if not (0b10000000 <= data[i+2] <= 0b10111111): return False if not (0b10000000 <= data[i+3] <= 0b10111111): return False i += 4 else: return False except IndexError: return False return i == datalen print Solution().validUtf8([]) print Solution().validUtf8([197, 130, 1]) print Solution().validUtf8([235, 140, 4]) print Solution().validUtf8([206,210,189,208,197,163,182,171,212,243,10,0,10])
python
from fastapi import FastAPI, status from pydantic import BaseModel, ValidationError from requests_html import HTMLSession from starlette.middleware.cors import CORSMiddleware from starlette.responses import JSONResponse session = HTMLSession() app = FastAPI( title="corona virus real time data", description="", version="0.3.0", docs_url="/docs", redoc_url="/redoc", openapi_url="/openapi.json", ) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) def get_data(*, country: str): respond = session.get("https://www.worldometers.info/coronavirus/") tbody = respond.html.find("tbody", first=True) trs = tbody.find("tr") data = {} for tr in trs: if f"{country}" in tr.text.lower(): tds = tr.find("td") country = 0 if tds[1].text == "" else tds[1].text total_case = 0 if tds[2].text == "" else tds[2].text new_case = 0 if tds[3].text == "" else tds[3].text total_death = 0 if tds[4].text == "" else tds[4].text new_death = 0 if tds[5].text == "" else tds[5].text total_recovered = 0 if tds[6].text == "" else tds[6].text new_recovered = 0 if tds[7].text == "" else tds[7].text active_case = 0 if tds[8].text == "" else tds[8].text serious_critical = 0 if tds[9].text == "" else tds[9].text total_cases_1_m_pop = 0 if tds[10].text == "" else tds[10].text total_deaths_1_m_pop = 0 if tds[11].text == "" else tds[11].text total_test = 0 if tds[12].text == "" else tds[12].text total_test_1_m_pop = 0 if tds[13].text == "" else tds[13].text population = 0 if tds[14].text == "" else tds[14].text continent = 0 if tds[15].text == "" else tds[15].text one_case_every_x_ppl = 0 if tds[16].text == "" else tds[16].text one_death_every_x_ppl = 0 if tds[17].text == "" else tds[17].text one_test_every_x_ppl = 0 if tds[18].text == "" else tds[18].text data.update( { "country": country, "total_case": total_case, "new_case": new_case, "total_death": total_death, "new_death": new_death, "total_recovered": total_recovered, "new_recovered":new_recovered, "active_case": active_case, "serious_critical": serious_critical, "total_cases_1_M_pop": total_cases_1_m_pop, "total_deaths_1_m_pop": total_deaths_1_m_pop, "total_test": total_test, "total_test_1_m_pop": total_test_1_m_pop, "population": population, "continent": continent, "one_case_every_x_ppl": one_case_every_x_ppl, "one_death_every_x_ppl": one_death_every_x_ppl, "one_test_every_x_ppl": one_test_every_x_ppl, } ) return data class CoronaVirusData(BaseModel): country: str total_case: str new_case: str total_death: str new_death: str total_recovered: str new_recovered: str active_case: str serious_critical: str total_cases_1_M_pop: str total_deaths_1_m_pop: str total_test: str total_test_1_m_pop: str population: str continent: str one_case_every_x_ppl: str one_death_every_x_ppl: str one_test_every_x_ppl: str @app.get("/", response_model=CoronaVirusData) async def get_country_corona_virus_data(country: str = "Ethiopia"): """Getting corona virus data from any country. Args: country: Tell what country data to get. Default to Ethiopia. Example: https://example.com/?country=china """ return get_data(country=country.lower()) @app.get("/total/") async def get_total_corona_virus_cases(): """Getting total corona virus cases.""" respond = session.get("https://www.worldometers.info/coronavirus/") cases, deaths, recovered = respond.html.find(".maincounter-number") total_currently_infected_patients = respond.html.find( ".number-table-main", first=True ).text total_cases_which_had_an_outcome = respond.html.find(".number-table-main")[1].text total_in_mild_condition = respond.html.find(".number-table", first=True).text total_serious_or_critical = respond.html.find(".number-table")[1].text totals_cases = cases.find("span", first=True).text totals_deaths = deaths.find("span", first=True).text totals_recovered = recovered.find("span", first=True).text return { "totals_cases": totals_cases, "totals_deaths": totals_deaths, "totals_recovered": totals_recovered, "total_currently_infected_patients": total_currently_infected_patients, "total_cases_which_had_an_outcome": total_cases_which_had_an_outcome, "total_in_mild_condition": total_in_mild_condition, "total_serious_or_critical": total_serious_or_critical, } async def http400_error_handler(_, exc): return JSONResponse( {"detail": "Country doesn't exist"}, status_code=status.HTTP_400_BAD_REQUEST ) app.add_exception_handler(ValidationError, http400_error_handler)
python
#! /usr/bin/env python3 from typing import Dict, List, Tuple import graphics import day24 from utils import get_file_lines class Hexagon(graphics.Polygon): def __init__(self, x, y, length): delta_x = (1, 0.5, -0.5, -1, -0.5, 0.5) delta_y = (0, -0.86602540378443864676372317075294, -0.86602540378443864676372317075294, 0, 0.86602540378443864676372317075294, -0.86602540378443864676372317075294) points = [(x, y)] for i in range(5): nx = points[-1][0] + length * delta_x[i] ny = points[-1][1] - length * delta_y[i] points.append((nx, ny)) super().__init__([graphics.Point(i,j) for i,j in points]) class HexagonGrid: def __init__(self, left, top, col_count, row_count, length): self.cells = [] self.filled_cells = set() y_length = length * 1.7320508075688772935274463415059 for x in range(col_count): self.cells.append([]) x_offset = left + 0.5 * length + 1.5 * length * x y_offset = top + (0 if x % 2 == 0 else y_length / 2) for y in range(row_count): hexagon = Hexagon(x_offset, y_offset + y * y_length, length) self.cells[-1].append(hexagon) def draw(self, graphwin): for row in self.cells: for cell in row: cell.draw(graphwin) def reset_cells(self, coords_to_fill): for coord in coords_to_fill: if coord not in self.filled_cells: y, x = int(coord.real), int(coord.imag) self.cells[y][x].setFill('red') for coord in (self.filled_cells - coords_to_fill): y, x = int(coord.real), int(coord.imag) self.cells[y][x].setFill('light grey') self.filled_cells = coords_to_fill def get_grid_size(floors: List[Dict[complex, int]]) -> Tuple[int, int]: minx, miny, maxx, maxy = 0, 0, 0, 0 for floor in floors: for pos in floor.keys(): minx = min(minx, int(pos.real)) miny = min(miny, int(pos.imag)) maxx = max(maxx, int(pos.real)) maxy = max(maxy, int(pos.imag)) return (maxx-minx+3, maxy-miny+2) def part1(floor: Dict[complex, int]) -> int: minx = int(min(pos.real for pos in floor.keys())) miny = int(min(pos.imag for pos in floor.keys())) maxx = int(max(pos.real for pos in floor.keys())) maxy = int(max(pos.imag for pos in floor.keys())) col_count, row_count = get_grid_size([floor]) x_offset = (maxx - minx) // 2 + 1 y_offset = (maxy - miny) // 2 win = graphics.GraphWin('Part 1', 1460, 920) grid = HexagonGrid(5, 5, col_count, row_count, 15) grid.draw(win) for pos, colour in floor.items(): if colour: grid.cells[int(pos.real+x_offset)][int(pos.imag+y_offset)].setFill('red') win.getMouse() def part2(floor: Dict[complex, int]) -> int: floors = [floor] for _ in range(20): floor = day24.next_floor(floor) floors.append(floor) col_count, row_count = get_grid_size(floors) x_offset = col_count // 2 y_offset = row_count // 2 center = complex(x_offset, y_offset) length = 10 row_height = length*1.7320508075688772935274463415059 print('cols',col_count, 'width',2*length*col_count + 10) win = graphics.GraphWin('Part 2', 1.5*length*col_count + 20, row_count*row_height + 20) grid = HexagonGrid(5, 5, col_count, row_count, length) grid.draw(win) for floor in floors: print(win.getMouse()) grid.reset_cells(set([center+pos for pos in floor.keys()])) print(win.getMouse()) return sum(floor.values()) if __name__ == '__main__': raw_data = get_file_lines('input/day24.txt') raw_floor = day24.get_initial_state(raw_data) part1(raw_floor) part2(raw_floor)
python
# TODO # class MeanAbsoluteError(): # def __init__(self): pass # TODO # class MeanBiasError(): # def __init__(self): pass # TODO # class ClassificationLosses(): # def __init__(self): pass # TODO # class Elbow(): # def __init__(self): pass # TODO # class EuclideanDistance(): # def __init__(self): pass # TODO # class Graussian(): # def __init__(self): pass #################################################################### import numpy as np # for math # Resources # https://ml-cheatsheet.readthedocs.io/en/latest/loss_functions.html def accuracy_score(y_true, y_pred): """ Compare y_true to y_pred and return the accuracy """ accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true) return accuracy class Loss(object): def __call__(self, y_true, y_pred): return NotImplementedError() def gradient(self, y, y_pred): raise NotImplementedError() def acc(self, y, y_pred): return 0 class MeanSquareError(Loss): def __call__(self, y_true, y_pred): return 0.5 * np.power((y_true - y_pred), 2) def gradient(self, y_true, y_pred): return -(y_true - y_pred) class CrossEntropy(): def __call__(self, y_true, y_pred): # Avoid division by zero y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) return - y_true * np.log(y_pred) - (1 - y_true) * np.log(1 - y_pred) def gradient(self, y_true, y_pred): # Avoid division by zero y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) return - (y_true / y_pred) + (1 - y_true) / (1 - y_pred) def acc(self, y, p): return accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1)) loss_functions = { "MSE" : MeanSquareError, "CrossEntropy" : CrossEntropy } # class CrossEntropy(): # # https://machinelearningmastery.com/cross-entropy-for-machine-learning/ # def __init__(self, epsilon=1e-15): # self.epsilon = epsilon# Close To 0 # def loss(self, yhat, y): # # Avoid division by zero # yhat = np.clip(yhat, self.epsilon, 1. - self.epsilon) # # get losses values # return -y * np.log(yhat) - (1 - y)* np.log(1 - yhat) # def accuracy(self, yhat, y): # return accuracy_score(np.argmax(y, axis=1), np.argmax(yhat, axis=1)) # def derivative(self, yhat, y): # # Avoid devision by zero # yhat = np.clip(yhat, self.epsilon, 1. - self.epsilon) # # get derivative values # return -(y / yhat) + (1 - y) / (1 - yhat) # class CrossEntropy(): # def loss(self, y, p): # # Avoid division by zero # p = np.clip(p, 1e-15, 1 - 1e-15) # return - y * np.log(p) - (1 - y) * np.log(1 - p) # def acc(self, y, p): # return accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1)) # def gradient(self, y, p): # # Avoid division by zero # p = np.clip(p, 1e-15, 1 - 1e-15) # return - (y / p) + (1 - y) / (1 - p) # if __name__ == "__main__": # yhat = np.array( # [ # [0.25,0.25,0.25,0.25], # [0.01,0.01,0.01,0.96] # ] # ) # y = np.array( # [ # [0,0,0,1], # [0,0,0,1] # ] # ) # mse = MeanSquareError() # print(mse.loss(yhat, y))
python
## @packege zeus_security_py # Helper package for data security that will implement zeus microservices # # from Cryptodome.Cipher import AES from Cryptodome import Random from hashlib import sha256 import base64 import os import json __author__ = "Noé Cruz | [email protected]" __copyright__ = "Copyright 2007, The Cogent Project" __credits__ = ["Noé Cruz", "Zurck'z", "Jesus Salazar"] __license__ = "MIT" __version__ = "0.0.1" __maintainer__ = "Noé Cruz" __email__ = "[email protected]" __status__ = "Dev" ## Class Encryptor # Encryptor class contains AES encrypt/decrypt functions # class AESEncryptor: """ Helper class for data security this contains certain methods for it. AES (Advanced Encryption Standard) is a symmetric block cipher standardized by NIST . It has a fixed data block size of 16 bytes. Its keys can be 128, 192, or 256 bits long. Attributes ---------- default_block_size : int Default block size for aes (default 32) _sk_env : str Key for get secret key from environment Methods ------- __is_valid(sk=None) Check if the secret key of argument is null, if that is null try to get secret key from environment. encrypt """ default_block_size: int = 32 _sk_env = "AES_SK" @staticmethod def __is_valid(sk: str = None): if sk is not None: return sk sk_env: str = os.getenv(AESEncryptor._sk_env) if sk_env is not None: return sk_env raise Exception("AES Secret key was not provided!") @staticmethod def decrypt_ws_response(payload: dict, secret_key=None) -> dict: json_decrypted = AESEncryptor.decrypt(payload["data"], secret_key) return json_decrypted @staticmethod def encrypt_ws_request(payload: dict, secret_key=None) -> dict: encrypted_payload = AESEncryptor.encrypt(json.dumps(payload), secret_key) return {"data": encrypted_payload} @staticmethod def json_decrypt(json_encrypted: str, secret_key=None) -> dict: return json.loads(AESEncryptor.encrypt(json_encrypted, secret_key)) @staticmethod def json_encrypt(json_to_encrypt: dict, secret_key=None) -> str: json_str = json.dumps(json_to_encrypt) return AESEncryptor.encrypt(json_str, secret_key) @staticmethod def json_decrypt(json_encrypted: str, secret_key=None) -> dict: return json.loads(AESEncryptor.encrypt(json_encrypted, secret_key)) @staticmethod def encrypt( value: str, secret_key: str = None, aes_mode=AES.MODE_CBC, charset="utf-8", block_size: int = 16, ) -> str: secret_key = AESEncryptor.__is_valid(secret_key).encode(charset) raw_bytes = AESEncryptor.__pad(value) iv = Random.new().read(block_size) cipher = AES.new(secret_key, aes_mode, iv) return base64.b64encode(iv + cipher.encrypt(raw_bytes)).decode(charset) @staticmethod def decrypt( value: str, secret_key=None, aes_mode=AES.MODE_CBC, charset="utf-8" ) -> str: secret_key = str.encode(AESEncryptor.__is_valid(secret_key)) encrypted = base64.b64decode(value) iv = encrypted[:16] cipher = AES.new(secret_key, aes_mode, iv) return AESEncryptor.__un_pad(cipher.decrypt(encrypted[16:])).decode(charset) @staticmethod def genHash(value: str, charset="utf-8") -> str: return sha256(value.encode(charset)).hexdigest() @staticmethod def __pad(s: str, block_size: int = 16, charset: str = "utf-8") -> bytes: return bytes( s + (block_size - len(s) % block_size) * chr(block_size - len(s) % block_size), charset, ) @staticmethod def __un_pad(value: str) -> str: return value[0 : -ord(value[-1:])]
python
# Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 """Helper function to generate the README table.""" import json import os from pathlib import Path import utils import composer from composer import functional as CF EXCLUDE_METHODS = ['no_op_model', 'utils'] HEADER = ['Name', 'Functional', 'Attribution', 'tl;dr'] ATTRIBUTES = ['class_name', 'functional', 'tldr', 'attribution', 'link'] GITHUB_BASE = 'https://github.com/mosaicml/composer/tree/dev/composer/algorithms/' folder_path = os.path.join(os.path.dirname(composer.__file__), 'algorithms') methods = utils.list_dirs(Path(folder_path)) methods = [m for m in methods if m not in EXCLUDE_METHODS] if not len(methods): raise ValueError(f'Found 0 methods in {folder_path}') print(f'Found {len(methods)} methods with metadata.') metadata = {} for name in methods: json_path = os.path.join(folder_path, name, 'metadata.json') with open(json_path, 'r') as f: metadata[name] = json.load(f)[name] # test functional method is importable method_functional = metadata[name]['functional'] if method_functional and not hasattr(CF, method_functional): raise ImportError(f'Unable to import functional form {method_functional} for {name}') metadata[name]['functional'] = f'`cf.{method_functional}`' metadata[name]['github_link'] = GITHUB_BASE + name # define row format row = [ '[{class_name}]({github_link})', '{functional}', lambda d: '[{attribution}]({link})' if d['link'] else ['attribution'], '{tldr}', ] table_md = utils.build_markdown_table( header=HEADER, metadata=metadata, sorted_keys=sorted(metadata.keys()), row_format=row, ) table_path = os.path.join(os.path.dirname(__file__), 'algorithms_table.md') with open(table_path, 'w') as f: f.write(table_md) print(f'Table written to {table_path}')
python
# -*- coding: utf-8 -*- """IdentityServicesEngineAPI network_access_time_date_conditions API fixtures and tests. Copyright (c) 2021 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import pytest from fastjsonschema.exceptions import JsonSchemaException from ciscoisesdk.exceptions import MalformedRequest from ciscoisesdk.exceptions import ciscoisesdkException from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match') def is_valid_get_network_access_time_conditions(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_ab916b19789c59b79dddbc2d0a3c57fc_v3_1_0').validate(obj.response) return True def get_network_access_time_conditions(api): endpoint_result = api.network_access_time_date_conditions.get_network_access_time_conditions( ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_get_network_access_time_conditions(api, validator): try: assert is_valid_get_network_access_time_conditions( validator, get_network_access_time_conditions(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def get_network_access_time_conditions_default(api): endpoint_result = api.network_access_time_date_conditions.get_network_access_time_conditions( ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_get_network_access_time_conditions_default(api, validator): try: assert is_valid_get_network_access_time_conditions( validator, get_network_access_time_conditions_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_create_network_access_time_condition(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_784b314d32b258a1b53c5c84cf84d396_v3_1_0').validate(obj.response) return True def create_network_access_time_condition(api): endpoint_result = api.network_access_time_date_conditions.create_network_access_time_condition( active_validation=False, attribute_name='string', attribute_value='string', children=[{'conditionType': 'string', 'isNegate': True, 'link': {'href': 'string', 'rel': 'string', 'type': 'string'}}], condition_type='string', dates_range={'endDate': 'string', 'startDate': 'string'}, dates_range_exception={'endDate': 'string', 'startDate': 'string'}, description='string', dictionary_name='string', dictionary_value='string', hours_range={'endTime': 'string', 'startTime': 'string'}, hours_range_exception={'endTime': 'string', 'startTime': 'string'}, id='string', is_negate=True, link={'href': 'string', 'rel': 'string', 'type': 'string'}, name='string', operator='string', payload=None, week_days=['string'], week_days_exception=['string'] ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_create_network_access_time_condition(api, validator): try: assert is_valid_create_network_access_time_condition( validator, create_network_access_time_condition(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def create_network_access_time_condition_default(api): endpoint_result = api.network_access_time_date_conditions.create_network_access_time_condition( active_validation=False, attribute_name=None, attribute_value=None, children=None, condition_type=None, dates_range=None, dates_range_exception=None, description=None, dictionary_name=None, dictionary_value=None, hours_range=None, hours_range_exception=None, id=None, is_negate=None, link=None, name=None, operator=None, payload=None, week_days=None, week_days_exception=None ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_create_network_access_time_condition_default(api, validator): try: assert is_valid_create_network_access_time_condition( validator, create_network_access_time_condition_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_get_network_access_time_condition_by_id(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_c941303330bc5615b3eb8d4d2702b874_v3_1_0').validate(obj.response) return True def get_network_access_time_condition_by_id(api): endpoint_result = api.network_access_time_date_conditions.get_network_access_time_condition_by_id( id='string' ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_get_network_access_time_condition_by_id(api, validator): try: assert is_valid_get_network_access_time_condition_by_id( validator, get_network_access_time_condition_by_id(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def get_network_access_time_condition_by_id_default(api): endpoint_result = api.network_access_time_date_conditions.get_network_access_time_condition_by_id( id='string' ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_get_network_access_time_condition_by_id_default(api, validator): try: assert is_valid_get_network_access_time_condition_by_id( validator, get_network_access_time_condition_by_id_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_update_network_access_time_condition_by_id(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_11232a518d5655f69e8687c9c98740c6_v3_1_0').validate(obj.response) return True def update_network_access_time_condition_by_id(api): endpoint_result = api.network_access_time_date_conditions.update_network_access_time_condition_by_id( active_validation=False, attribute_name='string', attribute_value='string', children=[{'conditionType': 'string', 'isNegate': True, 'link': {'href': 'string', 'rel': 'string', 'type': 'string'}}], condition_type='string', dates_range={'endDate': 'string', 'startDate': 'string'}, dates_range_exception={'endDate': 'string', 'startDate': 'string'}, description='string', dictionary_name='string', dictionary_value='string', hours_range={'endTime': 'string', 'startTime': 'string'}, hours_range_exception={'endTime': 'string', 'startTime': 'string'}, id='string', is_negate=True, link={'href': 'string', 'rel': 'string', 'type': 'string'}, name='string', operator='string', payload=None, week_days=['string'], week_days_exception=['string'] ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_update_network_access_time_condition_by_id(api, validator): try: assert is_valid_update_network_access_time_condition_by_id( validator, update_network_access_time_condition_by_id(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def update_network_access_time_condition_by_id_default(api): endpoint_result = api.network_access_time_date_conditions.update_network_access_time_condition_by_id( active_validation=False, id='string', attribute_name=None, attribute_value=None, children=None, condition_type=None, dates_range=None, dates_range_exception=None, description=None, dictionary_name=None, dictionary_value=None, hours_range=None, hours_range_exception=None, is_negate=None, link=None, name=None, operator=None, payload=None, week_days=None, week_days_exception=None ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_update_network_access_time_condition_by_id_default(api, validator): try: assert is_valid_update_network_access_time_condition_by_id( validator, update_network_access_time_condition_by_id_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e def is_valid_delete_network_access_time_condition_by_id(json_schema_validate, obj): if not obj: return False assert hasattr(obj, 'headers') assert hasattr(obj, 'content') assert hasattr(obj, 'text') assert hasattr(obj, 'response') json_schema_validate('jsd_e2a697abfe2058d3adc7ad9922f5a5d6_v3_1_0').validate(obj.response) return True def delete_network_access_time_condition_by_id(api): endpoint_result = api.network_access_time_date_conditions.delete_network_access_time_condition_by_id( id='string' ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_delete_network_access_time_condition_by_id(api, validator): try: assert is_valid_delete_network_access_time_condition_by_id( validator, delete_network_access_time_condition_by_id(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest)): print("ERROR: {error}".format(error=original_e)) raise original_e def delete_network_access_time_condition_by_id_default(api): endpoint_result = api.network_access_time_date_conditions.delete_network_access_time_condition_by_id( id='string' ) return endpoint_result @pytest.mark.network_access_time_date_conditions def test_delete_network_access_time_condition_by_id_default(api, validator): try: assert is_valid_delete_network_access_time_condition_by_id( validator, delete_network_access_time_condition_by_id_default(api) ) except Exception as original_e: with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)): raise original_e
python
"""Run calcsfh or hybridMC in Parallel (using subprocess)""" import argparse import logging import os import subprocess import sys from glob import glob1 import numpy as np logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) # Could be in a config or environ calcsfh = '$HOME/research/match2.5/bin/calcsfh' zcombine = '$HOME/research/match2.5/bin/zcombine' hybridmc = '$HOME/research/match2.5/bin/hybridMC' def test_files(prefs, run_calcsfh=True): """make sure match input files exist""" return_code = 0 for pref in prefs: if run_calcsfh: pfiles = calcsfh_existing_files(pref) else: pfiles = [hybridmc_existing_files(pref)] test = [os.path.isfile(f) for f in pfiles] if False in test: logger.error('missing a file in {}'.format(pref)) logger.error(pfiles) return_code += 1 if return_code > 0: sys.exit(2) return def uniform_filenames(prefs, dry_run=False): """ make all fake match and par files in a directory follow the format target_filter1_filter2.gst.suffix all lower case use dry_run to print the mv command, or will call os.system. """ from glob import glob1 for pref in prefs: dirname, p = os.path.split(pref) filters = '_'.join(p.split('_')[1:]) print dirname, p, filters fake, = glob1(dirname, '*{}*fake'.format(filters)) match, = glob1(dirname, '*{}*match'.format(filters)) param, = glob1(dirname, '*{}*param'.format(filters)) ufake = '_'.join(fake.split('_')[1:]).replace('_gst.fake1', '.gst').lower() umatch = '_'.join(match.split('_')[1:]).lower() uparam = param.replace('.param', '.gst.param').lower() for old, new in zip([fake, match, param], [ufake, umatch, uparam]): cmd = 'mv {dir}/{old} {dir}/{new}'.format(dir=dirname, old=old, new=new) logger.info(cmd) if not dry_run: os.system(cmd) def calcsfh_existing_files(pref, optfilter1=''): """file formats for param match and matchfake""" param = pref + '.param' match = pref + '.match' fake = pref + '.matchfake' return (param, match, fake) def calcsfh_new_files(pref): """file formats for match grid, sdout, and sfh file""" out = pref + '.out' scrn = pref + '.scrn' sfh = pref + '.sfh' return (out, scrn, sfh) def hybridmc_existing_files(pref): """file formats for the HMC, based off of calcsfh_new_files""" mcin = pref + '.out.dat' return mcin def hybridmc_new_files(pref): """file formats for HybridMC output and the following zcombine output""" pref = pref.strip() mcmc = pref + '.mcmc' mcscrn = mcmc + '.scrn' mczc = mcmc + '.zc' return (mcmc, mcscrn, mczc) def run_parallel(prefs, dry_run=False, nproc=8, run_calcsfh=True): """run calcsfh and zcombine in parallel, flags are hardcoded.""" test_files(prefs, run_calcsfh) rdict = {'calcsfh': calcsfh, 'zcombine': zcombine, 'hybridmc': hybridmc} # calcsfh # calcsfh, param, match, fake, out, scrn cmd1 = ('{calcsfh} {param} {match} {fake} {out} ', '-PARSEC -mcdata -kroupa -zinc -sub=v2 > {scrn}') # zcombine # zcombine, out, sfh cmd2 = '{zcombine} {out} -bestonly > {sfh}' # hybridmc # hybridmc, mcin, mcmc, mcscrn cmd3 = '{hybridmc} {mcin} {mcmc} -tint=2.0 -nmc=10000 -dt=0.015 > {mcscrn}' # zcombine w/ hybrid mc # zcombine, mcmc, mczc cmd4 = '{zcombine} {mcmc} -unweighted -medbest -jeffreys -best={mczc}' niters = np.ceil(len(prefs) / float(nproc)) sets = np.arange(niters * nproc, dtype=int).reshape(niters, nproc) logging.debug('{} prefs, {} niters'.format(len(prefs), niters)) for j, iset in enumerate(sets): # don't use not needed procs iset = iset[iset < len(prefs)] # run calcsfh procs = [] for i in iset: if run_calcsfh: rdict['param'], rdict['match'], rdict['fake'] = \ calcsfh_existing_files(prefs[i]) rdict['out'], rdict['scrn'], rdict['sfh'] = \ calcsfh_new_files(prefs[i]) cmd = cmd1.format(**rdict) else: rdict['mcin'] = hybridmc_existing_files(prefs[i]) rdict['mcmc'], rdict['mcscrn'], rdict['mczc'] = \ hybridmc_new_files(prefs[i]) cmd = cmd3.format(**rdict) if not dry_run: procs.append(subprocess.Popen(cmd, shell=True)) logger.info(cmd) # wait for calcsfh if not dry_run: [p.wait() for p in procs] logger.debug('calcsfh or hybridMC set {} complete'.format(j)) # run zcombine procs = [] for i in iset: if run_calcsfh: rdict['out'], rdict['scrn'], rdict['sfh'] = \ calcsfh_new_files(prefs[i]) zcom = cmd2.format(**rdict) else: zcom = cmd4.format(**rdict) if not dry_run: procs.append(subprocess.Popen(zcom, shell=True)) logger.info(zcom) # wait for zcombine if not dry_run: [p.wait() for p in procs] logger.debug('zcombine set {} complete'.format(j)) def main(argv): """parse in put args, setup logger, and call run_parallel""" desc = ('Run calcsfh in parallel. Note: bg cmd, if in use, ', 'need to be in the current folder') parser = argparse.ArgumentParser(description=desc) parser.add_argument('-d', '--dry_run', action='store_true', help='only print commands') parser.add_argument('-v', '--verbose', action='store_true', help='set logging to debug') parser.add_argument('-n', '--nproc', type=int, default=8, help='number of processors') parser.add_argument('-m', '--hmc', action='store_false', help='run hybridMC (must be after a calcsfh run)') parser.add_argument('-f', '--logfile', type=str, default='calcsfh_parallel.log', help='log file name') parser.add_argument('-s', '--simplify', action='store_true', help=('make filename uniform and exit ', '(before calcsfh run)')) parser.add_argument('pref_list', type=argparse.FileType('r'), help=("list of prefixs to run on. E.g.,", "ls */*.match | sed 's/.match//' > pref_list")) args = parser.parse_args(argv) prefs = [l.strip() for l in args.pref_list.readlines()] handler = logging.FileHandler(args.logfile) if args.verbose: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO) formatter = logging.Formatter(('%(asctime)s - %(name)s - ', '%(levelname)s - %(message)s')) handler.setFormatter(formatter) logger.addHandler(handler) if args.simplify: uniform_filenames(prefs, dry_run=args.dry_run) else: logger.info('running on {}'.format(', '.join([p.strip() for p in prefs]))) run_parallel(prefs, dry_run=args.dry_run, nproc=args.nproc, run_calcsfh=args.hmc) if __name__ == '__main__': main(sys.argv[1:])
python
import os import pickle import sys import time import logging from watchdog.observers import Observer from watchdog.events import LoggingEventHandler, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, \ DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff import logging from watchdog.events import LoggingEventHandler class _EmptySnapshot(DirectorySnapshot): @property def stat_snapshot(self): return dict() @property def paths(self): return set() class PersistantObserver(Observer): def __init__(self, *args, **kwargs): """ Check if watching folders has changed since last observation. If change detected, emit corresponding events at suscribers handlers. At the `Observer.stop`, save states of folders with pickle for the next observation. PARAMETERS ========== save_to : unicode path where save pickle dumping protocol (optionnal): int protocol used for dump current states of watching folders """ self._filename = kwargs.pop('save_to') self._protocol = kwargs.pop('protocol', 0) Observer.__init__(self, *args, **kwargs) def start(self, *args, **kwargs): previous_snapshots = dict() if os.path.exists(self._filename): with open(self._filename, 'rb') as f: previous_snapshots = pickle.load(f) for watcher, handlers in self._handlers.items(): try: path = watcher.path curr_snap = DirectorySnapshot(path) pre_snap = previous_snapshots.get(path, _EmptySnapshot(path)) diff = DirectorySnapshotDiff(pre_snap, curr_snap) for handler in handlers: # Dispatch files modifications for new_path in diff.files_created: handler.dispatch(FileCreatedEvent(new_path)) for del_path in diff.files_deleted: handler.dispatch(FileDeletedEvent(del_path)) for mod_path in diff.files_modified: handler.dispatch(FileModifiedEvent(mod_path)) for src_path, mov_path in diff.files_moved: handler.dispatch(FileMovedEvent(src_path, mov_path)) # Dispatch directories modifications for new_dir in diff.dirs_created: handler.dispatch(DirCreatedEvent(new_dir)) for del_dir in diff.dirs_deleted: handler.dispatch(DirDeletedEvent(del_dir)) for mod_dir in diff.dirs_modified: handler.dispatch(DirModifiedEvent(mod_dir)) for src_path, mov_path in diff.dirs_moved: handler.dispatch(DirMovedEvent(src_path, mov_path)) except PermissionError as e: print(e) Observer.start(self, *args, **kwargs) def stop(self, *args, **kwargs): try: snapshots = {handler.path: DirectorySnapshot(handler.path) for handler in self._handlers.keys()} with open(self._filename, 'wb') as f: pickle.dump(snapshots, f, self._protocol) Observer.stop(self, *args, **kwargs) except PermissionError as e: print(e) def observe_realtime(path=os.path.curdir): logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') event_handler = LoggingEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() def observe_over_sessions(path=os.path.curdir): logging.basicConfig(level=logging.DEBUG) event_handler = LoggingEventHandler() observer = PersistantObserver(save_to='C:\\temp\\test.pickle', protocol=-1) observer.schedule(event_handler, path=path, recursive=True) observer.start() # observer.join() observer.stop() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() def compare_dirs(src_path, dest_path): src_snap = DirectorySnapshot(src_path) dest_path = DirectorySnapshot(dest_path) diff = DirectorySnapshotDiff(src_snap, dest_path) print(diff.files_modified) if __name__ == "__main__": path = sys.argv[1] if len(sys.argv) > 1 else '.' # observe_realtime(path) # observe_over_sessions(path) compare_dirs("C:\\New folder\\temp", "C:\\temp")
python
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright © Spyder Project Contributors # # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) # ----------------------------------------------------------------------------- """ Tests for the console plugin. """ # Standard library imports try: from unittest.mock import Mock except ImportError: from mock import Mock # Python 2 # Third party imports from qtpy.QtCore import Qt from qtpy.QtWidgets import QMainWindow import pytest from flaky import flaky # Local imports from spyder.config.manager import CONF from spyder.plugins.console.plugin import Console # ============================================================================= # Fixtures # ============================================================================= @pytest.fixture def console_plugin(qtbot): """Console plugin fixture.""" class MainWindowMock(QMainWindow): def __getattr__(self, attr): return Mock() window = MainWindowMock() console_plugin = Console(parent=window, configuration=CONF) console_plugin.start_interpreter({}) window.setCentralWidget(console_plugin.get_widget()) qtbot.addWidget(window) window.resize(640, 480) window.show() return console_plugin # ============================================================================= # Tests # ============================================================================= @flaky(max_runs=3) def test_run_code(console_plugin, capsys): """Test that the console runs code.""" shell = console_plugin.get_widget().shell # Run a simple code shell.insert_text('2+2', at_end=True) shell._key_enter() # Capture stdout and assert that it's the expected one sys_stream = capsys.readouterr() assert sys_stream.out == u'4\n' @flaky(max_runs=3) def test_completions(console_plugin, qtbot): """Test that completions work as expected.""" shell = console_plugin.get_widget().shell # Get completions qtbot.keyClicks(shell, 'impor') qtbot.keyClick(shell, Qt.Key_Tab) qtbot.keyClick(shell.completion_widget, Qt.Key_Enter) # Assert completion was introduced in the console assert u'import' in shell.toPlainText() if __name__ == "__main__": pytest.main()
python
from discord.ext.alternatives import silent_delete from bot import Bot Bot().run()
python
import distutils.command.build import setuptools.command.egg_info from setuptools import setup, Extension, find_packages from Cython.Build import cythonize import os def get_build_dir(default): return os.environ.get('STFPY_BUILD_DIR', default) # Override egg command class EggCommand(setuptools.command.egg_info.egg_info): def initialize_options(self): setuptools.command.egg_info.egg_info.initialize_options(self) self.egg_base = get_build_dir(self.egg_base) # Override build command class BuildCommand(distutils.command.build.build): def initialize_options(self): distutils.command.build.build.initialize_options(self) self.build_base = get_build_dir(self.build_base) setup( name = "stfpy", packages = find_packages(), cmdclass = {'build': BuildCommand, 'egg_info': EggCommand}, ext_modules = cythonize(Extension('*', sources=["stfpy/*.pyx"], language='c++', extra_link_args=os.environ.get('LDFLAGS', '').split(' ')), # Ensure our link flags come last nthreads = 4, language_level = "3") )
python
from geolocalizador import * endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatastica, Departamento de Ciencia da Computacao. Rua do Matao 1010 Cidade Universitaria 05508090 - Sao Paulo, SP - Brasil Telefone: (11) 30916135 Ramal: 6235 Fax: (11) 30916134 URL da Homepage: http://www.ime.usp.br/~cesar/'.encode('utf8','replace') g = Geolocalizador(endereco) endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatistica. Rua do Matao, 1010 - Cidade Universitaria Butanta 05508-090 - Sao Paulo, SP - Brasil URL da Homepage: http://www.vision.ime.usp.br/~jmena/' g = Geolocalizador(endereco) endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatistica. Rua do Matao, 1010 - Cidade Universitaria Butanta 0090 - Arequipa, - Peru URL da Homepage: http://www.vision.ime.usp.br/~jmena/' g = Geolocalizador(endereco) endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatastica, Departamento de Cienci 6235 Fax: (11) 30916134 URL da Homepage: http://www.ime.usp.br/~cesar/'.encode('utf8','replace') g = Geolocalizador(endereco)
python
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys from telemetry import test from measurements import image_decoding class ImageDecodingToughImageCases(test.Test): test = image_decoding.ImageDecoding # TODO: Rename this page set to tough_image_cases.json page_set = 'page_sets/image_decoding_measurement.json' # crbug.com/323015 enabled = not sys.platform.startswith('linux')
python
from .hook_group import HookGroup class Event(HookGroup): def __init__(self, event=None, hooks=None, config=None): self.type = event super().__init__(hooks=hooks, config=config)
python
# ------------------------------------------------- # Data Types for Data Science in Python - Handling Dates and Times # 24 set 2020 # VNTBJR # ------------------------------------------------ # # Load packages reticulate::repl_python() # Load data import csv csvfile2 = open("Datasets/cta_summary.csv", mode = 'r') daily_summaries = [] for row in csv.reader(csvfile2): daily_summaries.append(row) quit() csvfile2.close() daily_summaries.pop(0) print(daily_summaries) dates_list = [] riderships = [] for date in daily_summaries: dates_list.append(date[0]) riderships.append(date[4]) quit() datetimes_list0 = [] for date in dates_list: datetimes_list0.append(datetime.strptime(date, '%m/%d/%Y')) quit() daily_summaries2 = list(zip(datetimes_list0, riderships)) print(daily_summaries2) daily_summaries3 = defaultdict(list) dict_inside1 = defaultdict(list) dict_inside2 = defaultdict(list) # Loop over the list daily_summaries for daily_summary in daily_summaries: # Convert the service_date to a datetime object service_datetime = datetime.strptime(daily_summary[0], '%m/%d/%Y') # Add the total rides to the current amount for the month daily_summaries3[service_datetime] = dict_inside1['day_type'] = daily_summary[1] daily_summaries3[service_datetime] = dict_inside2['total_ridership'] = daily_summary[4] quit() # Print monthly_total_rides print(daily_summaries3) review_dates = [] for date in daily_summaries: review_dates.append(datetime.strptime(date[0], '%m/%d/%Y')) quit() review_dates = review_dates[4469:4479] print(review_dates) len(review_dates) ####################################################### # There and Back Again a Date Time Journey------------------------------------- ####################################################### # Strings to DateTimes # Import the datetime object from datetime from datetime import datetime # Iterate over the dates_list datetimes_list = [] for date_str in dates_list: # Convert each date to a datetime object: date_dt datetimes_list.append(datetime.strptime(date_str, '%m/%d/%Y')) quit() # Print each date_dt print(datetimes_list) # Converting to a String # Loop over the first 10 items of the datetimes_list for item in datetimes_list[:10]: # Print out the record as a string in the format of 'MM/DD/YYYY' print(datetime.strftime(item, '%m/%d/%Y')) # Print out the record as an ISO standard string print(datetime.isoformat(item)) quit() ####################################################### # Working with Datetime Components and Current time ----------------------------- ####################################################### # Pieces of Time from datetime import datetime from collections import defaultdict # Create a defaultdict of an integer: monthly_total_rides monthly_total_rides = defaultdict(int) # Loop over the list daily_summaries for daily_summary in daily_summaries: # Convert the service_date to a datetime object service_datetime = datetime.strptime(daily_summary[0], '%m/%d/%Y') # Add the total rides to the current amount for the month monthly_total_rides[service_datetime.month] += int(daily_summary[4]) quit() # Print monthly_total_rides print(monthly_total_rides) # Creating DateTime Objects... Now # Import datetime from the datetime module from datetime import datetime # Compute the local datetime: local_dt local_dt = datetime.now() # Print the local datetime print(local_dt) # Compute the UTC datetime: utc_dt utc_dt = datetime.utcnow() # Print the UTC datetime print(utc_dt) # Timezones from pytz import timezone # Create a Timezone object for Chicago chicago_usa_tz = timezone('US/Central') # Create a Timezone object for New York ny_usa_tz = timezone('US/Eastern') # Iterate over the daily_summaries list for orig_dt, ridership in daily_summaries2: # Make the orig_dt timezone "aware" for Chicago chicago_dt = orig_dt.replace(tzinfo = chicago_usa_tz) # Convert chicago_dt to the New York Timezone ny_dt = chicago_dt.astimezone(ny_usa_tz) # Print the chicago_dt, ny_dt, and ridership print('Chicago: %s, NY: %s, Ridership: %s' % (chicago_dt, ny_dt, ridership)) quit() ####################################################### # Time Travel (Adding and Subtracting Time) ---------------------------------- ####################################################### # Finding a time in the future and from the past # object daily_summaries for this exercise is missing... # Import timedelta from the datetime module from datetime import timedelta # Build a timedelta of 30 days: glanceback glanceback = timedelta(days = 30) # Iterate over the review_dates as date for date in review_dates: # Calculate the date 30 days back: prior_period_dt prior_period_dt = date - glanceback # Print the review_date, day_type and total_ridership print('Date: %s, Type: %s, Total Ridership: %s' % (date, daily_summaries[date]['day_type'], daily_summaries[date]['total_ridership'])) # Print the prior_period_dt, day_type and total_ridership print('Date: %s, Type: %s, Total Ridership: %s' % (prior_period_dt, daily_summaries[prior_period_dt]['day_type'], daily_summaries[prior_period_dt]['total_ridership'])) quit() # Finding differences in DateTimes # object date_ranges for this exercise is missing # Iterate over the date_ranges for start_date, end_date in date_ranges: # Print the End and Start Date print(end_date, start_date) # Print the difference between each end and start date print(end_date - start_date) quit() ####################################################### # HELP! Libraries to make it easier -------------------------------------------- ####################################################### # Pendulum library # .parse() convert a string to a pendulum datetime object without the need # of the formating string # .in_timezone() convert a pendulum object to a desired timezone # .now() accepts a timezone you want to get the current time in # .in_XXX() (days, months, years...) provide the difference in a chosen metric # .in_words() provides the difference in a nice expressive form # Localizing time with pendulum # Import the pendulum module import pendulum # Create a now datetime for Tokyo: tokyo_dt tokyo_dt = pendulum.now('Asia/Tokyo') # Covert the tokyo_dt to Los Angeles: la_dt la_dt = tokyo_dt.in_timezone('America/Los_Angeles') # Print the ISO 8601 string of la_dt print(la_dt.to_iso8601_string()) # Humanizing Differences with Pendulum # Iterate over date_ranges for start_date, end_date in date_ranges: # Convert the start_date string to a pendulum date: start_dt start_dt = pendulum.parse(start_date, strict = False) # Convert the end_date string to a pendulum date: end_dt end_dt = pendulum.parse(end_date, strict = False) # Print the End and Start Date print(end_dt, start_dt) # Calculate the difference between end_dt and start_dt: diff_period diff_period = end_dt - start_dt # Print the difference in days print(diff_period.in_days()) #######################################################
python
import requests import folium import geocoder import string import os import json from functools import wraps, update_wrapper from datetime import datetime from pathlib import Path from flask_bootstrap import Bootstrap from flask_nav import Nav from flask_nav.elements import * from dominate.tags import img from ediblepickle import checkpoint from flask import Flask, render_template, request, redirect, url_for, send_file, make_response ############################################### # Define navbar with logo # ############################################### logo = img(src='./static/img/logo.png', height="50", width="50", style="margin-top:-15px") #here we define our menu items topbar = Navbar(logo, Link('IXWater','http://ixwater.com'), View('Home', 'main') ) # registers the "top" menubar nav = Nav() nav.register_element('top', topbar) app = Flask(__name__) Bootstrap(app) app.config['TEMPLATES_AUTO_RELOAD'] = True app.vars = {} @app.route('/') def main(): return redirect('/index.html') @app.route('/index.html', methods=['GET']) def index(): if request.method == 'GET': #return render_template('input.html') map_name = f"commercecity_outfalls_8dec2021.html" #have to set map path - used by template map_path = os.path.join(app.root_path, 'static/' + map_name) app.vars['map_path'] = map_path if Path(map_path).exists(): return render_template('display.html') else: return redirect('/maperror.html') pass @app.route('/maps/map.html') def show_map(): map_path = app.vars.get("map_path") map_file = Path(map_path) if map_file.exists(): return send_file(map_path) else: return render_template('error.html', culprit='map file', details="the map file couldn't be loaded") pass @app.route('/error.html') def error(): details = "There was some kind of error." return render_template('error.html', culprit='logic', details=details) @app.route('/apierror.html') def apierror(): details = "There was an error with one of the API calls you attempted." return render_template('error.html', culprit='API', details=details) @app.route('/maperror.html') def geoerror(): details = "Map not found." return render_template('error.html', culprit='the Map', details=details) nav.init_app(app) if __name__ == '__main__': app.debug = True app.run(host='0.0.0.0')
python
from django.db.models.signals import pre_save, post_save from django.dispatch import receiver from asset_events.models import StatusChangingEvent @receiver(post_save) def update_asset_status(sender, instance, **kwargs): if not issubclass(sender, StatusChangingEvent): return sender.post_save(instance)
python
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2018-06-05 08:38 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('yaksh', '0015_auto_20180601_1215'), ] operations = [ migrations.AlterField( model_name='question', name='type', field=models.CharField(choices=[('mcq', 'Single Correct Choice'), ('mcc', 'Multiple Correct Choices'), ('code', 'Code'), ('upload', 'Assignment Upload'), ('integer', 'Answer in Integer'), ('string', 'Answer in String'), ('float', 'Answer in Float'), ('arrange', 'Arrange in Correct Order')], max_length=24), ), migrations.AlterField( model_name='testcase', name='type', field=models.CharField(choices=[('standardtestcase', 'Standard Testcase'), ('stdiobasedtestcase', 'StdIO Based Testcase'), ('mcqtestcase', 'MCQ Testcase'), ('hooktestcase', 'Hook Testcase'), ('integertestcase', 'Integer Testcase'), ('stringtestcase', 'String Testcase'), ('floattestcase', 'Float Testcase'), ('arrangetestcase', 'Arrange Testcase'), ('easystandardtestcase', 'Easy Standard Testcase')], max_length=24, null=True), ), ]
python
# Generated by Django 3.0.5 on 2020-12-11 07:03 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('content_api', '0002_auto_20201002_1228'), ] operations = [ migrations.AlterModelOptions( name='category', options={'verbose_name': 'Категория', 'verbose_name_plural': 'Категории'}, ), migrations.AlterModelOptions( name='genre', options={'verbose_name': 'Жанр', 'verbose_name_plural': 'Жанры'}, ), migrations.AlterModelOptions( name='title', options={'verbose_name': 'Произведение', 'verbose_name_plural': 'Произведения'}, ), migrations.AlterField( model_name='category', name='name', field=models.CharField(max_length=30, verbose_name='Название'), ), migrations.AlterField( model_name='category', name='slug', field=models.SlugField(max_length=30, unique=True, verbose_name='url'), ), migrations.AlterField( model_name='genre', name='name', field=models.CharField(max_length=30, verbose_name='Название'), ), migrations.AlterField( model_name='genre', name='slug', field=models.SlugField(max_length=30, unique=True, verbose_name='url'), ), migrations.AlterField( model_name='title', name='category', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='titles', to='content_api.Category', verbose_name='Категория'), ), migrations.AlterField( model_name='title', name='description', field=models.TextField(blank=True, null=True, verbose_name='Описание'), ), migrations.AlterField( model_name='title', name='genre', field=models.ManyToManyField(related_name='titles', to='content_api.Genre', verbose_name='Жанр'), ), migrations.AlterField( model_name='title', name='name', field=models.TextField(verbose_name='Название'), ), migrations.AlterField( model_name='title', name='rating', field=models.IntegerField(blank=True, null=True, verbose_name='Рейтинг'), ), migrations.AlterField( model_name='title', name='year', field=models.PositiveSmallIntegerField(db_index=True, verbose_name='Год'), ), ]
python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testscenarios from cliff import command from cliff import commandmanager from cliff.tests import base from cliff.tests import utils load_tests = testscenarios.load_tests_apply_scenarios class TestLookupAndFind(base.TestBase): scenarios = [ ('one-word', {'argv': ['one']}), ('two-words', {'argv': ['two', 'words']}), ('three-words', {'argv': ['three', 'word', 'command']}), ] def test(self): mgr = utils.TestCommandManager(utils.TEST_NAMESPACE) cmd, name, remaining = mgr.find_command(self.argv) self.assertTrue(cmd) self.assertEqual(' '.join(self.argv), name) self.assertFalse(remaining) class TestLookupWithRemainder(base.TestBase): scenarios = [ ('one', {'argv': ['one', '--opt']}), ('two', {'argv': ['two', 'words', '--opt']}), ('three', {'argv': ['three', 'word', 'command', '--opt']}), ] def test(self): mgr = utils.TestCommandManager(utils.TEST_NAMESPACE) cmd, name, remaining = mgr.find_command(self.argv) self.assertTrue(cmd) self.assertEqual(['--opt'], remaining) class TestFindInvalidCommand(base.TestBase): scenarios = [ ('no-such-command', {'argv': ['a', '-b']}), ('no-command-given', {'argv': ['-b']}), ] def test(self): mgr = utils.TestCommandManager(utils.TEST_NAMESPACE) try: mgr.find_command(self.argv) except ValueError as err: # make sure err include 'a' when ['a', '-b'] self.assertIn(self.argv[0], str(err)) self.assertIn('-b', str(err)) else: self.fail('expected a failure') class TestFindUnknownCommand(base.TestBase): def test(self): mgr = utils.TestCommandManager(utils.TEST_NAMESPACE) try: mgr.find_command(['a', 'b']) except ValueError as err: self.assertIn("['a', 'b']", str(err)) else: self.fail('expected a failure') class TestDynamicCommands(base.TestBase): def test_add(self): mgr = utils.TestCommandManager(utils.TEST_NAMESPACE) mock_cmd = mock.Mock() mgr.add_command('mock', mock_cmd) found_cmd, name, args = mgr.find_command(['mock']) self.assertIs(mock_cmd, found_cmd) def test_intersected_commands(self): def foo(arg): pass def foo_bar(): pass mgr = utils.TestCommandManager(utils.TEST_NAMESPACE) mgr.add_command('foo', foo) mgr.add_command('foo bar', foo_bar) self.assertIs(foo_bar, mgr.find_command(['foo', 'bar'])[0]) self.assertIs( foo, mgr.find_command(['foo', 'arg0'])[0], ) class TestLoad(base.TestBase): def test_load_commands(self): testcmd = mock.Mock(name='testcmd') testcmd.name.replace.return_value = 'test' mock_pkg_resources = mock.Mock(return_value=[testcmd]) with mock.patch('pkg_resources.iter_entry_points', mock_pkg_resources) as iter_entry_points: mgr = commandmanager.CommandManager('test') iter_entry_points.assert_called_once_with('test') names = [n for n, v in mgr] self.assertEqual(['test'], names) def test_load_commands_keep_underscores(self): testcmd = mock.Mock() testcmd.name = 'test_cmd' mock_pkg_resources = mock.Mock(return_value=[testcmd]) with mock.patch('pkg_resources.iter_entry_points', mock_pkg_resources) as iter_entry_points: mgr = commandmanager.CommandManager( 'test', convert_underscores=False, ) iter_entry_points.assert_called_once_with('test') names = [n for n, v in mgr] self.assertEqual(['test_cmd'], names) def test_load_commands_replace_underscores(self): testcmd = mock.Mock() testcmd.name = 'test_cmd' mock_pkg_resources = mock.Mock(return_value=[testcmd]) with mock.patch('pkg_resources.iter_entry_points', mock_pkg_resources) as iter_entry_points: mgr = commandmanager.CommandManager( 'test', convert_underscores=True, ) iter_entry_points.assert_called_once_with('test') names = [n for n, v in mgr] self.assertEqual(['test cmd'], names) class FauxCommand(command.Command): def take_action(self, parsed_args): return 0 class FauxCommand2(FauxCommand): pass class TestLegacyCommand(base.TestBase): def test_find_legacy(self): mgr = utils.TestCommandManager(None) mgr.add_command('new name', FauxCommand) mgr.add_legacy_command('old name', 'new name') cmd, name, remaining = mgr.find_command(['old', 'name']) self.assertIs(cmd, FauxCommand) self.assertEqual(name, 'old name') def test_legacy_overrides_new(self): mgr = utils.TestCommandManager(None) mgr.add_command('cmd1', FauxCommand) mgr.add_command('cmd2', FauxCommand2) mgr.add_legacy_command('cmd2', 'cmd1') cmd, name, remaining = mgr.find_command(['cmd2']) self.assertIs(cmd, FauxCommand) self.assertEqual(name, 'cmd2') def test_no_legacy(self): mgr = utils.TestCommandManager(None) mgr.add_command('cmd1', FauxCommand) self.assertRaises( ValueError, mgr.find_command, ['cmd2'], ) def test_no_command(self): mgr = utils.TestCommandManager(None) mgr.add_legacy_command('cmd2', 'cmd1') self.assertRaises( ValueError, mgr.find_command, ['cmd2'], )
python
# Copyright (C) 2013 Claudio "nex" Guarnieri (@botherder) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from lib.cuckoo.common.abstracts import Signature class NetworkHTTP(Signature): name = "network_http" description = "Performs some HTTP requests" severity = 2 categories = ["http"] authors = ["nex"] minimum = "2.0" host_safelist = [ "www.msftncsi.com" ] def on_complete(self): for http in getattr(self, "get_net_http_ex", lambda: [])(): if http["host"] in self.host_safelist: continue self.mark_ioc("request", "%s %s://%s%s" % ( http["method"], http["protocol"], http["host"], http["uri"], )) return self.has_marks()
python
# -*- coding: utf-8 -*- from calendar import timegm from collections import defaultdict from datetime import datetime from importlib import import_module from os import path as op import re from pkg_resources import DistributionNotFound, iter_entry_points, load_entry_point from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import get_lexer_by_name from pygments.util import ClassNotFound from peppermynt.containers import Config, Container, Item, Items, Posts, SiteContent, Page from peppermynt.exceptions import ConfigException, ContentException, ParserException, RendererException from peppermynt.fs import File from peppermynt.utils import get_logger, dest_path, Timer, unescape, Url logger = get_logger('peppermynt') class Reader: def __init__(self, src, temp, dest, site, writer): self._writer = writer self._parsers = {} self._extensions = defaultdict(list) self._cache = {} self.src = src self.temp = temp self.dest = dest self.site = site self._find_parsers() def _find_parsers(self): for parser in iter_entry_points('peppermynt.parsers'): name = parser.name try: Parser = parser.load() except DistributionNotFound as e: logger.debug('@@ The %s parser could not be loaded due to a missing requirement: %s.', name, str(e)) continue for extension in Parser.accepts: if 'parsers' in self.site and self.site['parsers'].get(extension.lstrip('.')) == name: self._extensions[extension].insert(0, name) else: self._extensions[extension].append(name) self._parsers[name] = Parser def _get_date(self, mtime, date): if not date: return mtime d = [None, None, None, 0, 0] for i, v in enumerate(date.split('-')): d[i] = v if not d[3]: d[3], d[4] = mtime.strftime('%H %M').split() elif not d[4]: d[4] = '{0:02d}'.format(d[4]) return datetime.strptime('-'.join(d), '%Y-%m-%d-%H-%M') def _get_parser(self, item, parser = None): if not parser: try: parser = self._extensions[item.extension()][0] except KeyError: raise ParserException('No parser found that accepts \'{0}\' files.'.format(item.extension()), 'src: {0}'.format(item)) if parser in self._cache: return self._cache[parser] options = self.site.get(parser, None) if parser in self._parsers: Parser = self._parsers[parser](options) else: try: Parser = import_module('peppermynt.parsers.{0}'.format(parser)).Parser(options) except ImportError: raise ParserException('The {0} parser could not be found.'.format(parser)) self._cache[parser] = Parser return Parser def _parse_filename(self, f): date, text = re.match(r'(?:(\d{4}(?:-\d{2}-\d{2}){1,2})-)?(.+)', f.name).groups() return (text, self._get_date(f.mtime, date)) def _init_container(self, container): for f in container.path: container.add(self._init_item(container.config, f)) container.sort() container.tag() container.archive() return container def _init_item(self, config, f, simple = False): Timer.start() frontmatter, bodymatter = self._parse_item_frontmatter(f) item = Item(f.path) text, date = self._parse_filename(f) item['date'] = date.strftime(self.site['date_format']) item['timestamp'] = timegm(date.utctimetuple()) if simple: item['url'] = Url.from_path(f.root.path.replace(self.src.path, ''), text) else: item['tags'] = [] item['url'] = Url.from_format(config['url'], text, date, frontmatter) item['dest'] = dest_path(self.dest.path, item['url']) item.update(frontmatter) item['raw_content'] = bodymatter return item def parse_item(self, config, item, simple = False): bodymatter = item.pop('raw_content') parser = self._get_parser(item, item.get('parser', config.get('parser', None))) content = parser.parse(self._writer.from_string(bodymatter, item)) item['content'] = content if not simple: item['excerpt'] = re.search(r'\A.*?(?:<p>(.+?)</p>)?', content, re.M | re.S).group(1) logger.debug('.. (%.3fs) %s', Timer.stop(), str(item).replace(self.src.path, '')) return item def _parse_item_frontmatter(self, f): try: frontmatter, bodymatter = re.search(r'\A---\s+^(.+?)$\s+---\s*(.*)\Z', f.content, re.M | re.S).groups() frontmatter = Config(frontmatter) except AttributeError: raise ContentException('Invalid frontmatter.', 'src: {0}'.format(f.path), 'frontmatter must not be empty') except ConfigException: raise ConfigException('Invalid frontmatter.', 'src: {0}'.format(f.path), 'fontmatter contains invalid YAML') if 'layout' not in frontmatter: raise ContentException('Invalid frontmatter.', 'src: {0}'.format(f.path), 'layout must be set') frontmatter.pop('url', None) return frontmatter, bodymatter def init_parse(self): posts = self._init_container(Posts(self.src, self.site)) containers = {} miscellany = Container('miscellany', self.src, None) pages = posts.pages feeds = [] for name, config in self.site['containers'].items(): container = self._init_container(Items(name, self.src, config)) containers[name] = container pages.extend(container.pages) for f in miscellany.path: if f.extension in self._extensions: miscellany.add(self._init_item(miscellany.config, f, True)) elif f.extension == '.xml': # Assume for now that the only xml files are feeds feeds.append(Page(f.path.replace(self.src.path, ''), None, None)) elif f.extension in ('.html', '.htm'): pages.append(Page(f.path.replace(self.src.path, ''), None, None)) pages.extend(miscellany.pages) return SiteContent(posts, containers, pages, feeds) class Writer: def __init__(self, src, temp, dest, site): self.src = src self.temp = temp self.dest = dest self.site = site self._renderer = self._get_renderer() def _get_renderer(self): renderer = self.site['renderer'] options = self.site.get(renderer, None) try: Renderer = load_entry_point('peppermynt', 'peppermynt.renderers', renderer) except DistributionNotFound as e: raise RendererException('The {0} renderer requires {1}.'.format(renderer, str(e))) except ImportError: try: Renderer = import_module('peppermynt.renderers.{0}'.format(renderer)).Renderer except ImportError: raise RendererException('The {0} renderer could not be found.'.format(renderer)) return Renderer(self.src.path, options) def _highlight(self, match): language, code = match.groups() formatter = HtmlFormatter(linenos = 'table') code = unescape(code) try: code = highlight(code, get_lexer_by_name(language), formatter) except ClassNotFound: code = highlight(code, get_lexer_by_name('text'), formatter) return '<div class="code"><div>{0}</div></div>'.format(code) def _pygmentize(self, html): return re.sub(r'<pre><code[^>]+data-lang="([^>]+)"[^>]*>(.+?)</code></pre>', self._highlight, html, flags = re.S) def from_string(self, string, data = None): return self._renderer.from_string(string, data) def register(self, data): self._renderer.register(data) def render_path(self, template, _data = None, url = None): return dest_path(self.dest.path, url or template) def render(self, template, data = None, url = None): path = self.render_path(template, data, url) try: Timer.start() content = self._renderer.render(template, data) if self.site['pygmentize']: content = self._pygmentize(content) logger.debug('.. (%.3fs) %s', Timer.stop(), path.replace(self.dest.path, '')) except RendererException as e: raise RendererException( e.message, '{0} in container item {1}'.format(template, data.get('item', url or template)) ) return File(path, content)
python
import asyncio from xwing.socket.server import Server BACKEND_ADDRESS = '/var/tmp/xwing.socket' async def start_server(loop): server = Server(loop, BACKEND_ADDRESS, 'server0') await server.listen() conn = await server.accept() while True: data = await conn.recv() if not data: break await conn.send(data) conn.close() loop = asyncio.get_event_loop() loop.run_until_complete(start_server(loop)) loop.close()
python
import sys import json if len(sys.argv) < 2: print('uso: python tag_input.py <arquivo>') exit(-1) arquivo_entrada = open(sys.argv[1], 'r', encoding='utf8') fluxo = json.load(arquivo_entrada) arquivo_entrada.close() for bloco in fluxo: for action_moment in ['$enteringCustomActions', '$leavingCustomActions']: for i, acao in enumerate(fluxo[bloco][action_moment]): try: acao['type'] except: print(json.dumps(acao, indent=4)) continue if acao['type'] == 'ProcessHttp' and acao['settings']['uri'] == '{{config.api}}/blip/tracking': body = json.loads(acao['settings']['body']) for track in body: fluxo[bloco][action_moment].append( { 'type': 'TrackEvent', '$title': acao['$title'], '$invalid': False, 'settings': { 'category': track['category'], 'action': track['action'], 'extras': track['extras'] } } ) fluxo[bloco][action_moment].pop(i) nome_saida = '%s MIGRATED.json' % (sys.argv[1].split('.')[0]) arquivo_saida = open(nome_saida, 'w', encoding='utf8') arquivo_saida.write(json.dumps(fluxo)) arquivo_saida.close() print('Feito! Salvo no arquivo %s' % nome_saida)
python
# coding=utf-8 from setuptools import setup, find_packages setup( name="wsgi-listenme", description="WSGI middleware for capture and browse requests and responses", version='1.0', author='Mario César Señoranis Ayala', author_email='[email protected]', url='https://github.com/humanzilla/wsgi-listenme', packages=find_packages('wsgi_listenme'), license="MIT license", install_requires=[''], tests_require=["tox"], zip_safe=False, include_package_data=True )
python
name=("Rayne","Coder","Progammer","Enginner","VScode") (man,*item,software)=name print(man) #*item container for all value that not contain by man and software print(item) print(software)
python
import unittest from unittest import mock from .. import surface class TestEllipsoidDem(unittest.TestCase): def test_height(self): test_dem = surface.EllipsoidDem(3396190, 3376200) self.assertEqual(test_dem.get_height(0, 0), 0) self.assertEqual(test_dem.get_height(0, 180), 0) self.assertEqual(test_dem.get_height(90, 100), 0) def test_radius(self): test_dem = surface.EllipsoidDem(3396190, 3376200) self.assertEqual(test_dem.get_radius(0, 0), 3396190) self.assertEqual(test_dem.get_radius(0, 180), 3396190) self.assertEqual(test_dem.get_radius(90, 300), 3376200) def tearDown(self): pass class TestGdalDem(unittest.TestCase): def test_height(self): with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset: mockInstance = mockDataset.return_value mockInstance.latlon_to_pixel.return_value = (1,2) mockInstance.read_array.return_value = [[100]] test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200) self.assertEqual(test_dem.get_height(0, 0), 100) self.assertEqual(test_dem.get_height(0, 180), 100) self.assertEqual(test_dem.get_height(90, 300), 100) def test_height_from_radius(self): with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset: mockInstance = mockDataset.return_value mockInstance.latlon_to_pixel.return_value = (1,2) mockInstance.read_array.return_value = [[3396190]] test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200, 'radius') self.assertEqual(test_dem.get_height(0, 0), 0) self.assertEqual(test_dem.get_height(0, 180), 0) self.assertEqual(test_dem.get_height(90, 300), 19990) def test_radius(self): with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset: mockInstance = mockDataset.return_value mockInstance.latlon_to_pixel.return_value = (1,2) mockInstance.read_array.return_value = [[3396190]] test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200, 'radius') self.assertEqual(test_dem.get_radius(0, 0), 3396190) self.assertEqual(test_dem.get_radius(0, 180), 3396190) self.assertEqual(test_dem.get_radius(90, 300), 3396190) def test_radius_from_height(self): with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset: mockInstance = mockDataset.return_value mockInstance.latlon_to_pixel.return_value = (1,2) mockInstance.read_array.return_value = [[100]] test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200) self.assertEqual(test_dem.get_radius(0, 0), 3396290) self.assertEqual(test_dem.get_radius(0, 180), 3396290) self.assertEqual(test_dem.get_radius(90, 300), 3376300) def tearDown(self): pass
python
# ==正規表現によるスクレイピング== import re from html import unescape # プロジェクト配下にダウンロードしたhtmlファイルを開き、レスポンスボディを変数に格納。 with open('../sample.scraping-book.com/dp.html') as f: html = f.read() # findallを使って書籍一冊分のhtml情報を取得する # re.DOTALL => 改行も含むすべての文字にマッチ for partial_html in re.findall(r'<a itemprop="url".*?</ul>\s*</a></li>', html, re.DOTALL): # 書籍のurlはa要素のhref属性から取得する # .group()に0を渡すと正規表現全体にマッチした値が得られ、 # 1を渡すと正規表現の()で囲った部分にマッチした値を取得できる url = re.search(r'<a itemprop="url" href="(.*?)">', partial_html).group(1) url = 'http://sample.scraping-book.com' + url title = re.search(r'<p itemprop="name".*?</p>', partial_html).group(0) # 値を置き換える # re.subでは正規表現でパターン指定できている点に注目。 title = title.replace('<br/>', ' ') title = re.sub(r'<.*?>', '', title) title = unescape(title) print(url, title) print(1)
python
""" 735. Asteroid Collision Medium We are given an array asteroids of integers representing asteroids in a row. For each asteroid, the absolute value represents its size, and the sign represents its direction (positive meaning right, negative meaning left). Each asteroid moves at the same speed. Find out the state of the asteroids after all collisions. If two asteroids meet, the smaller one will explode. If both are the same size, both will explode. Two asteroids moving in the same direction will never meet. Example 1: Input: asteroids = [5,10,-5] Output: [5,10] Explanation: The 10 and -5 collide resulting in 10. The 5 and 10 never collide. Example 2: Input: asteroids = [8,-8] Output: [] Explanation: The 8 and -8 collide exploding each other. Example 3: Input: asteroids = [10,2,-5] Output: [10] Explanation: The 2 and -5 collide resulting in -5. The 10 and -5 collide resulting in 10. Constraints: 2 <= asteroids.length <= 104 -1000 <= asteroids[i] <= 1000 asteroids[i] != 0 """ # V0 # IDEA : STACK class Solution(object): def asteroidCollision(self, asteroids): stack = [] for item in asteroids: while stack and item < 0 and stack[-1] >= 0: pre = stack.pop() if item == -pre: item = None break elif -item < pre: item = pre if item != None: stack.append(item) return stack # V0 # IDEA : STACK class Solution(object): def asteroidCollision(self, asteroids): ans = [] for new in asteroids: while ans and new < 0 < ans[-1]: if ans[-1] < -new: ans.pop() continue elif ans[-1] == -new: ans.pop() break else: ans.append(new) return ans # V1 # IDEA : STACK # https://leetcode.com/problems/asteroid-collision/solution/ class Solution(object): def asteroidCollision(self, asteroids): ans = [] for new in asteroids: while ans and new < 0 < ans[-1]: if ans[-1] < -new: ans.pop() continue elif ans[-1] == -new: ans.pop() break else: ans.append(new) return ans # V1 # https://blog.csdn.net/fuxuemingzhu/article/details/81079015 class Solution(object): def asteroidCollision(self, asteroids): stack = [] for ast in asteroids: while stack and ast < 0 and stack[-1] >= 0: pre = stack.pop() if ast == -pre: ast = None break elif -ast < pre: ast = pre if ast != None: stack.append(ast) return stack # V2 # Time: O(n) # Space: O(n) class Solution(object): def asteroidCollision(self, asteroids): """ :type asteroids: List[int] :rtype: List[int] """ result = [] for asteroid in asteroids: while result and asteroid < 0 < result[-1]: if result[-1] < -asteroid: result.pop() continue elif result[-1] == -asteroid: result.pop() break else: result.append(asteroid) return result
python
import torch import time class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter) for meter in self.meters] print('\t'.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits) + 'd}' return '[' + fmt + '/' + fmt.format(num_batches) + ']' def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res @torch.no_grad() def evaluate(val_loader, model, device=None, print_freq=100): if device is None: device = next(model.parameters()).device else: model.to(device) batch_time = AverageMeter('Time', ':6.3f') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter( len(val_loader), [batch_time, top1, top5], prefix='Test: ') # switch to evaluate mode model.eval() end = time.time() for i, (images, target) in enumerate(val_loader): images = images.to(device) target = target.to(device) # compute output output = model(images) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0: progress.display(i) print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5)) return top1.avg
python
from time import time from json import dumps, loads from redis import StrictRedis, ConnectionPool, WatchError from PyYADL.distributed_lock import AbstractDistributedLock class RedisLock(AbstractDistributedLock): def __init__(self, name, prefix=None, ttl=-1, existing_connection_pool=None, redis_host='localhost', redis_port=6379, redis_password=None, redis_db=0, **kwargs): super().__init__(name, prefix, ttl) client_connection = existing_connection_pool or ConnectionPool(host=redis_host, port=redis_port, password=redis_password, db=redis_db, **kwargs) self._client = StrictRedis(connection_pool=client_connection) self.LOCK_KEY = self._build_lock_key() def _build_lock_key(self): key = '' if self.prefix: key = key + self.prefix + ':' key = key + 'lock:' + self.name return key def _write_lock_if_not_exists(self): value = dumps({'timestamp': int(time()), 'secret': self._secret, 'exclusive': True}) ttl = self.ttl if self.ttl > 0 else None result = self._client.set(name=self.LOCK_KEY, value=value, ex=ttl, nx=True) return bool(result) def _verify_secret(self) -> bool: result = self._client.get(self.LOCK_KEY) secret = loads(result.decode('utf-8')).get('secret') if result is not None else None if secret is None: raise RuntimeError('release unlocked lock') return secret == self._secret def _delete_lock(self): return bool(self._client.delete(self.LOCK_KEY)) class RedisWriteLock(RedisLock): pass class RedisReadLock(RedisLock): def _write_lock_if_not_exists(self): with self._client.pipeline() as pipe: try: pipe.watch(self.LOCK_KEY) raw_lock_data = pipe.get(self.LOCK_KEY) lock_data = loads(raw_lock_data.decode('utf-8')) if raw_lock_data else self._generate_new_lock_data() if not self._is_valid_read_lock_data(lock_data): return False lock_data['secret'] = list(set(lock_data['secret'] + [self._secret])) lock_data['timestamp'] = int(time()) ttl = self.ttl if self.ttl > 0 else None pipe.multi() pipe.set(self.LOCK_KEY, value=dumps(lock_data), ex=ttl) pipe.execute() return True except WatchError: self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY) return self._write_lock_if_not_exists() @staticmethod def _is_valid_read_lock_data(lock_data): return (lock_data.get('exclusive', True) is False) and (isinstance(lock_data.get('secret'), (list, set, tuple))) def _generate_new_lock_data(self): return {'timestamp': int(time()), 'secret': [self._secret], 'exclusive': False} def _verify_secret(self) -> bool: with self._client.pipeline() as pipe: try: pipe.watch(self.LOCK_KEY) raw_lock_data = pipe.get(self.LOCK_KEY) if raw_lock_data is None: return False lock_data = loads(raw_lock_data.decode('utf-8')) if not self._is_valid_read_lock_data(lock_data): return False return self._secret in lock_data['secret'] except WatchError: self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY) return self._verify_secret() def _delete_lock(self): with self._client.pipeline() as pipe: try: pipe.watch(self.LOCK_KEY) raw_lock_data = pipe.get(self.LOCK_KEY) if raw_lock_data is None: return False lock_data = loads(raw_lock_data.decode('utf-8')) if not self._is_valid_read_lock_data(lock_data): return False if self._secret not in lock_data['secret']: return False secrets = lock_data['secret'] secrets.remove(self._secret) ttl = pipe.ttl(self.LOCK_KEY) if not secrets: pipe.multi() pipe.delete(self.LOCK_KEY) pipe.execute() return True else: lock_data['secret'] = secrets pipe.multi() pipe.set(self.LOCK_KEY, value=dumps(lock_data), ex=ttl) pipe.execute() return True except WatchError: self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY) return self._delete_lock()
python
from .elbo import ELBO __all__ = [ 'ELBO' ]
python