max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
response_model/python/population_subunits/coarse/analysis/few_cells_tf_analyse_all.py
googlearchive/rgc-models
1
11500
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== '''Analysis file.''' import sys import os.path import tensorflow as tf from absl import app from absl import flags from absl import gfile import cPickle as pickle import matplotlib matplotlib.use('TkAgg') from matplotlib import pylab import matplotlib.pyplot as plt import numpy as np, h5py import scipy.io as sio from scipy import ndimage import random import re # regular expression matching FLAGS = flags.FLAGS flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data') flags.DEFINE_string('save_location', '/home/bhaishahster/', 'where to store logs and outputs?'); flags.DEFINE_string('data_location', '/home/bhaishahster/data_breakdown/', 'where to take data from?') flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data') flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed') flags.DEFINE_integer('randseed', 65, 'python RNG seed') flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells') flags.DEFINE_string('model_id', 'poisson', 'which model to fit') FLAGS = flags.FLAGS def main(argv): print('\nCode started') np.random.seed(FLAGS.np_randseed) random.seed(FLAGS.randseed) ## Load data summary filename = FLAGS.data_location + 'data_details.mat' summary_file = gfile.Open(filename, 'r') data_summary = sio.loadmat(summary_file) cells = np.squeeze(data_summary['cells']) if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge': cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066) if FLAGS.model_id == 'poisson_full': cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool') n_cells = np.sum(cells_choose) tot_spks = np.squeeze(data_summary['tot_spks']) total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T tot_spks_chosen_cells = tot_spks[cells_choose] chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool') print(np.shape(chosen_mask)) print(np.sum(chosen_mask)) stim_dim = np.sum(chosen_mask) print('\ndataset summary loaded') # use stim_dim, chosen_mask, cells_choose, tot_spks_chosen_cells, n_cells # decide the number of subunits to fit n_su = FLAGS.ratio_SU*n_cells #batchsz = [100, 500, 1000, 100, 500, 1000, 100, 500, 1000, 1000, 1000, 5000, 10000, 5000, 10000] #n_b_in_c = [10, 2, 1, 10, 2, 1, 10, 2, 1, 1, 1, 1, 1, 1, 1 ] #step_sz = [0.0001, 0.0001, 0.0001, 0.01, 0.01, 0.01 , 1, 1, 1, 10, 100, 10, 10, 1, 1 ] batchsz = [100, 500, 1000, 5000, 1000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000] n_b_in_c = [10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1 ] step_sz = [0.1, 0.1, 0.1, 0.1, 0.1, 1 , 1, 1, 1, 1, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10 ] with tf.Session() as sess: # Learn population model! stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim') resp = tf.placeholder(tf.float32, name='resp') data_len = tf.placeholder(tf.float32, name='data_len') # get filename if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'poisson_full': w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32')) a = tf.Variable(np.array(0.1 * np.random.rand(n_cells, 1, n_su), dtype='float32')) if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge': w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32')) a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells), dtype='float32')) b_init = np.random.randn(n_cells) #np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0))) b = tf.Variable(b_init,dtype='float32') plt.figure() for icnt, ibatchsz in enumerate(batchsz): in_b_in_c = n_b_in_c[icnt] istep_sz = np.array(step_sz[icnt],dtype='double') print(icnt) if FLAGS.model_id == 'poisson': short_filename = ('data_model=ASM_pop_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) + '_step_sz'+ str(istep_sz)+'_bg') else: short_filename = ('data_model='+ str(FLAGS.model_id) +'_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) + '_step_sz'+ str(istep_sz)+'_bg') parent_folder = FLAGS.save_location + FLAGS.folder_name + '/' save_location = parent_folder +short_filename + '/' print(gfile.IsDirectory(save_location)) print(save_location) save_filename = save_location + short_filename #determine filelist file_list = gfile.ListDirectory(save_location) save_filename = save_location + short_filename print('\nLoading: ', save_filename) bin_files = [] meta_files = [] for file_n in file_list: if re.search(short_filename + '.', file_n): if re.search('.meta', file_n): meta_files += [file_n] else: bin_files += [file_n] #print(bin_files) print(len(meta_files), len(bin_files), len(file_list)) # get latest iteration iterations = np.array([]) for file_name in bin_files: try: iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1])) except: print('Could not load filename: ' + file_name) iterations.sort() print(iterations) iter_plot = iterations[-1] print(int(iter_plot)) # load tensorflow variables saver_var = tf.train.Saver(tf.all_variables()) restore_file = save_filename + '-' + str(int(iter_plot)) saver_var.restore(sess, restore_file) a_eval = a.eval() print(np.exp(np.squeeze(a_eval))) #print(np.shape(a_eval)) # get 2D region to plot mask2D = np.reshape(chosen_mask, [40, 80]) nz_idx = np.nonzero(mask2D) np.shape(nz_idx) print(nz_idx) ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1]) xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1]) w_eval = w.eval() #plt.figure() n_su = w_eval.shape[1] for isu in np.arange(n_su): xx = np.zeros((3200)) xx[chosen_mask] = w_eval[:, isu] fig = plt.subplot(20, n_su, n_su * icnt + isu+1) plt.imshow(np.reshape(xx, [40, 80]), interpolation='nearest', cmap='gray') plt.ylim(ylim) plt.xlim(xlim) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) #if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge': # plt.title(str(a_eval[isu, :])) #else: # plt.title(str(np.squeeze(np.exp(a_eval[:, 0, isu]))), fontsize=12) if isu == 4: plt.title('Iteration:' + str(int(iter_plot)) + ' batchSz:' + str(ibatchsz) + ' step size:' + str(istep_sz), fontsize=18) plt.show() plt.draw() if __name__ == '__main__': app.run()
1.890625
2
wagtail/admin/views/pages/unpublish.py
brownaa/wagtail
8,851
11501
<reponame>brownaa/wagtail<gh_stars>1000+ from django.core.exceptions import PermissionDenied from django.shortcuts import get_object_or_404, redirect from django.template.response import TemplateResponse from django.urls import reverse from django.utils.translation import gettext as _ from wagtail.admin import messages from wagtail.admin.views.pages.utils import get_valid_next_url_from_request from wagtail.core import hooks from wagtail.core.models import Page, UserPagePermissionsProxy def unpublish(request, page_id): page = get_object_or_404(Page, id=page_id).specific user_perms = UserPagePermissionsProxy(request.user) if not user_perms.for_page(page).can_unpublish(): raise PermissionDenied next_url = get_valid_next_url_from_request(request) if request.method == 'POST': include_descendants = request.POST.get("include_descendants", False) for fn in hooks.get_hooks('before_unpublish_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result page.unpublish(user=request.user) if include_descendants: for live_descendant_page in page.get_descendants().live().defer_streamfields().specific(): if user_perms.for_page(live_descendant_page).can_unpublish(): live_descendant_page.unpublish() for fn in hooks.get_hooks('after_unpublish_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[ messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')) ]) if next_url: return redirect(next_url) return redirect('wagtailadmin_explore', page.get_parent().id) return TemplateResponse(request, 'wagtailadmin/pages/confirm_unpublish.html', { 'page': page, 'next': next_url, 'live_descendant_count': page.get_descendants().live().count(), })
1.945313
2
bcipy/display/rsvp/display.py
mberkanbicer/BciPy
32
11502
import logging import os.path as path from typing import List, Optional, Tuple from psychopy import core, visual from bcipy.acquisition.marker_writer import NullMarkerWriter, MarkerWriter from bcipy.helpers.task import SPACE_CHAR from bcipy.helpers.stimuli import resize_image from bcipy.helpers.system_utils import get_screen_resolution from bcipy.helpers.triggers import TriggerCallback, _calibration_trigger class RSVPDisplay(object): """RSVP Display Object for inquiry Presentation. Animates a inquiry in RSVP. Mode should be determined outside. """ def __init__( self, window: visual.Window, static_clock, experiment_clock: core.Clock, marker_writer: Optional[MarkerWriter] = None, task_color: List[str] = ['white'], task_font: str = 'Times', task_pos: Tuple[float, float] = (-.8, .9), task_height: float = 0.2, task_text: str = '1/100', info_color: List[str] = ['white'], info_text: List[str] = ['Information Text'], info_font: List[str] = ['Times'], info_pos=[(.8, .9)], info_height=[0.2], stim_font='Times', stim_pos=(-.8, .9), stim_height=0.2, stim_inquiry: List[str] = ['a'] * 10, stim_colors: List[str] = ['white'] * 10, stim_timing: List[float] = [1] * 10, is_txt_stim: bool = True, static_time: float = .05, trigger_type: str = 'image', space_char: SPACE_CHAR = SPACE_CHAR): """Initialize RSVP window parameters and objects. PARAMETERS: ---------- # Experiment window(visual.Window): PsychoPy Window static_clock(TODO): no idea experiment_clock(core.Clock): Clock used to timestamp experiment marker_writer(MarkerWriter): object used to write triggers to the daq stream. # Task task_color(list[string]): Color of the task string. Shares the length of the task_text. If of length 1 the entire task bar shares the same color. task_font(string): Font of task string task_pos(tuple): position of task string task_height(float): height for task string task_text(string): text of the task bar # Info info_text(list[string]): Text list for information texts info_color(list[string]): Color of the information text string info_font(list[string]): Font of the information text string info_pos(list[tuple]): Position of the information text string info_height(list[float]): Height of the information text string # Stimuli stim_height(float): height of the stimuli object stim_pos(tuple): position of stimuli stim_font(string): font of the stimuli stim_inquiry(list[string]): list of elements to flash stim_colors(list[string]): list of colors for stimuli stim_timing(list[float]): timing for each letter flash """ self.window = window self.refresh_rate = window.getActualFrameRate() self.logger = logging.getLogger(__name__) self.stimuli_inquiry = stim_inquiry self.stimuli_colors = stim_colors self.stimuli_timing = stim_timing self.is_txt_stim = is_txt_stim self.staticPeriod = static_clock self.static_time = static_time self.experiment_clock = experiment_clock self.timing_clock = core.Clock() # Used to handle writing the marker stimulus self.marker_writer = marker_writer or NullMarkerWriter() # Length of the stimuli (number of flashes) self.stim_length = len(stim_inquiry) # Informational Parameters self.info_text = info_text # Stim parameters self.stimuli_font = stim_font self.stimuli_height = stim_height self.stimuli_pos = stim_pos # Trigger Items self.first_run = True self.trigger_type = trigger_type self.trigger_callback = TriggerCallback() # Callback used on presentation of first stimulus. self.first_stim_callback = lambda _sti: None self.size_list_sti = [] self.space_char = space_char self.task = visual.TextStim(win=self.window, color=task_color[0], height=task_height, text=task_text, font=task_font, pos=task_pos, wrapWidth=None, colorSpace='rgb', opacity=1, depth=-6.0) # Create multiple text objects based on input self.text = [] for idx in range(len(self.info_text)): self.text.append(visual.TextStim( win=self.window, color=info_color[idx], height=info_height[idx], text=self.info_text[idx], font=info_font[idx], pos=info_pos[idx], wrapWidth=None, colorSpace='rgb', opacity=1, depth=-6.0)) # Create Stimuli Object if self.is_txt_stim: self.sti = visual.TextStim( win=self.window, color='white', height=self.stimuli_height, text='+', font=self.stimuli_font, pos=self.stimuli_pos, wrapWidth=None, colorSpace='rgb', opacity=1, depth=-6.0) else: self.sti = visual.ImageStim( win=self.window, image=None, mask=None, pos=self.stimuli_pos, ori=0.0) def draw_static(self): """Draw static elements in a stimulus.""" self.task.draw() for idx in range(len(self.text)): self.text[idx].draw() def schedule_to(self, ele_list=[], time_list=[], color_list=[]): """Schedule stimuli elements (works as a buffer). Args: ele_list(list[string]): list of elements of stimuli time_list(list[float]): list of timings of stimuli color_list(list[string]): colors of elements of stimuli """ self.stimuli_inquiry = ele_list self.stimuli_timing = time_list self.stimuli_colors = color_list def update_task(self, text: str, color_list: List[str], pos: Tuple[float]): """Update Task Object. PARAMETERS: ----------- text: text for task color_list: list of the colors for each char pos: position of task """ self.task.text = text self.task.color = color_list[0] self.task.pos = pos def do_inquiry(self): """Do inquiry. Animates a inquiry of flashing letters to achieve RSVP. """ # init an array for timing information timing = [] if self.first_run: # play a inquiry start sound to help orient triggers first_stim_timing = _calibration_trigger( self.experiment_clock, trigger_type=self.trigger_type, display=self.window, on_trigger=self.marker_writer.push_marker) timing.append(first_stim_timing) self.first_stim_time = first_stim_timing[-1] self.first_run = False # generate a inquiry (list of stimuli with meta information) inquiry = self._generate_inquiry() # do the inquiry for idx in range(len(inquiry)): self.is_first_stim = (idx == 0) # set a static period to do all our stim setting. # will warn if ISI value is violated. self.staticPeriod.name = 'Stimulus Draw Period' self.staticPeriod.start(self.stimuli_timing[idx]) # Reset the timing clock to start presenting self.window.callOnFlip( self.trigger_callback.callback, self.experiment_clock, inquiry[idx]['sti_label']) self.window.callOnFlip(self.marker_writer.push_marker, inquiry[idx]['sti_label']) if idx == 0 and callable(self.first_stim_callback): self.first_stim_callback(inquiry[idx]['sti']) # Draw stimulus for n frames inquiry[idx]['sti'].draw() self.draw_static() self.window.flip() core.wait((inquiry[idx]['time_to_present'] - 1) / self.refresh_rate) # End static period self.staticPeriod.complete() # append timing information if self.is_txt_stim: timing.append(self.trigger_callback.timing) else: timing.append(self.trigger_callback.timing) self.trigger_callback.reset() # draw in static and flip once more self.draw_static() self.window.flip() return timing def _generate_inquiry(self): """Generate inquiry. Generate stimuli for next RSVP inquiry. """ stim_info = [] for idx in range(len(self.stimuli_inquiry)): current_stim = {} # turn ms timing into frames! Much more accurate! current_stim['time_to_present'] = int(self.stimuli_timing[idx] * self.refresh_rate) # check if stimulus needs to use a non-default size if self.size_list_sti: this_stimuli_size = self.size_list_sti[idx] else: this_stimuli_size = self.stimuli_height # Set the Stimuli attrs if self.stimuli_inquiry[idx].endswith('.png'): current_stim['sti'] = self.create_stimulus(mode='image', height_int=this_stimuli_size) current_stim['sti'].image = self.stimuli_inquiry[idx] current_stim['sti'].size = resize_image( current_stim['sti'].image, current_stim['sti'].win.size, this_stimuli_size) current_stim['sti_label'] = path.splitext( path.basename(self.stimuli_inquiry[idx]))[0] else: # text stimulus current_stim['sti'] = self.create_stimulus(mode='text', height_int=this_stimuli_size) txt = self.stimuli_inquiry[idx] # customize presentation of space char. current_stim['sti'].text = txt if txt != SPACE_CHAR else self.space_char current_stim['sti'].color = self.stimuli_colors[idx] current_stim['sti_label'] = txt # test whether the word will be too big for the screen text_width = current_stim['sti'].boundingBox[0] if text_width > self.window.size[0]: monitor_width, monitor_height = get_screen_resolution() text_height = current_stim['sti'].boundingBox[1] # If we are in full-screen, text size in Psychopy norm units # is monitor width/monitor height if self.window.size[0] == monitor_width: new_text_width = monitor_width / monitor_height else: # If not, text width is calculated relative to both # monitor size and window size new_text_width = ( self.window.size[1] / monitor_height) * ( monitor_width / monitor_height) new_text_height = (text_height * new_text_width) / text_width current_stim['sti'].height = new_text_height stim_info.append(current_stim) return stim_info def update_task_state(self, text: str, color_list: List[str]) -> None: """Update task state. Removes letters or appends to the right. Args: text(string): new text for task state color_list(list[string]): list of colors for each """ task_state_text = visual.TextStim( win=self.window, font=self.task.font, text=text) x_task_position = task_state_text.boundingBox[0] / \ self.window.size[0] - 1 task_pos = (x_task_position, 1 - self.task.height) self.update_task(text=text, color_list=color_list, pos=task_pos) def wait_screen(self, message, color): """Wait Screen. Args: message(string): message to be displayed while waiting """ # Construct the wait message wait_message = visual.TextStim(win=self.window, font=self.stimuli_font, text=message, height=.1, color=color, pos=(0, -.5), wrapWidth=2, colorSpace='rgb', opacity=1, depth=-6.0) # Try adding our BCI logo. Pass if not found. try: wait_logo = visual.ImageStim( self.window, image='bcipy/static/images/gui_images/bci_cas_logo.png', pos=(0, .5), mask=None, ori=0.0) wait_logo.size = resize_image( 'bcipy/static/images/gui_images/bci_cas_logo.png', self.window.size, 1) wait_logo.draw() except Exception: self.logger.debug('Cannot load logo image') pass # Draw and flip the screen. wait_message.draw() self.window.flip() def create_stimulus(self, height_int: int, mode: str = 'text'): """Create Stimulus. Returns a TextStim or ImageStim object. Args: height_int: The height of the stimulus mode: "text" or "image", determines which to return """ if mode == 'text': return visual.TextStim( win=self.window, color='white', height=height_int, text='+', font=self.stimuli_font, pos=self.stimuli_pos, wrapWidth=None, colorSpace='rgb', opacity=1, depth=-6.0) if mode == 'image': return visual.ImageStim( win=self.window, image=None, mask=None, units='', pos=self.stimuli_pos, size=(height_int, height_int), ori=0.0)
1.929688
2
cloud/db/db.py
bother3000/Smart-IoT-Planting-System
171
11503
#!/usr/bin/env python import pymysql #Python3 db = pymysql.connect("localhost","sips","root","zaijian" ) cursor = db.cursor() cursor.execute("SELECT VERSION()") data = cursor.fetchone() print ("Database version : %s " % data) db.close() def create_table(): db = pymysql.connect("localhost","sips","root","zaijian" ) cursor = db.cursor() cursor.execute("DROP TABLE IF EXISTS EMPLOYEE") sql = """CREATE TABLE EMPLOYEE ( FIRST_NAME CHAR(20) NOT NULL, LAST_NAME CHAR(20), AGE INT, SEX CHAR(1), INCOME FLOAT )""" cursor.execute(sql) db.close() def db_insert(): db = pymysql.connect("localhost","sips","root","zaijian" ) cursor = db.cursor() sql = """INSERT INTO EMPLOYEE(FIRST_NAME, LAST_NAME, AGE, SEX, INCOME) VALUES ('Mac', 'Mohan', 20, 'M', 2000)""" try: cursor.execute(sql) db.commit() except: db.rollback() db.close()
3.28125
3
sdk/python/pulumi_aws_native/amplify/_inputs.py
AaronFriel/pulumi-aws-native
29
11504
<gh_stars>10-100 # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from ._enums import * __all__ = [ 'AppAutoBranchCreationConfigArgs', 'AppBasicAuthConfigArgs', 'AppCustomRuleArgs', 'AppEnvironmentVariableArgs', 'AppTagArgs', 'BranchBasicAuthConfigArgs', 'BranchEnvironmentVariableArgs', 'BranchTagArgs', 'DomainSubDomainSettingArgs', ] @pulumi.input_type class AppAutoBranchCreationConfigArgs: def __init__(__self__, *, auto_branch_creation_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, basic_auth_config: Optional[pulumi.Input['AppBasicAuthConfigArgs']] = None, build_spec: Optional[pulumi.Input[str]] = None, enable_auto_branch_creation: Optional[pulumi.Input[bool]] = None, enable_auto_build: Optional[pulumi.Input[bool]] = None, enable_performance_mode: Optional[pulumi.Input[bool]] = None, enable_pull_request_preview: Optional[pulumi.Input[bool]] = None, environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input['AppEnvironmentVariableArgs']]]] = None, pull_request_environment_name: Optional[pulumi.Input[str]] = None, stage: Optional[pulumi.Input['AppAutoBranchCreationConfigStage']] = None): if auto_branch_creation_patterns is not None: pulumi.set(__self__, "auto_branch_creation_patterns", auto_branch_creation_patterns) if basic_auth_config is not None: pulumi.set(__self__, "basic_auth_config", basic_auth_config) if build_spec is not None: pulumi.set(__self__, "build_spec", build_spec) if enable_auto_branch_creation is not None: pulumi.set(__self__, "enable_auto_branch_creation", enable_auto_branch_creation) if enable_auto_build is not None: pulumi.set(__self__, "enable_auto_build", enable_auto_build) if enable_performance_mode is not None: pulumi.set(__self__, "enable_performance_mode", enable_performance_mode) if enable_pull_request_preview is not None: pulumi.set(__self__, "enable_pull_request_preview", enable_pull_request_preview) if environment_variables is not None: pulumi.set(__self__, "environment_variables", environment_variables) if pull_request_environment_name is not None: pulumi.set(__self__, "pull_request_environment_name", pull_request_environment_name) if stage is not None: pulumi.set(__self__, "stage", stage) @property @pulumi.getter(name="autoBranchCreationPatterns") def auto_branch_creation_patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "auto_branch_creation_patterns") @auto_branch_creation_patterns.setter def auto_branch_creation_patterns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "auto_branch_creation_patterns", value) @property @pulumi.getter(name="basicAuthConfig") def basic_auth_config(self) -> Optional[pulumi.Input['AppBasicAuthConfigArgs']]: return pulumi.get(self, "basic_auth_config") @basic_auth_config.setter def basic_auth_config(self, value: Optional[pulumi.Input['AppBasicAuthConfigArgs']]): pulumi.set(self, "basic_auth_config", value) @property @pulumi.getter(name="buildSpec") def build_spec(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "build_spec") @build_spec.setter def build_spec(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "build_spec", value) @property @pulumi.getter(name="enableAutoBranchCreation") def enable_auto_branch_creation(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_auto_branch_creation") @enable_auto_branch_creation.setter def enable_auto_branch_creation(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_auto_branch_creation", value) @property @pulumi.getter(name="enableAutoBuild") def enable_auto_build(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_auto_build") @enable_auto_build.setter def enable_auto_build(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_auto_build", value) @property @pulumi.getter(name="enablePerformanceMode") def enable_performance_mode(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_performance_mode") @enable_performance_mode.setter def enable_performance_mode(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_performance_mode", value) @property @pulumi.getter(name="enablePullRequestPreview") def enable_pull_request_preview(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_pull_request_preview") @enable_pull_request_preview.setter def enable_pull_request_preview(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_pull_request_preview", value) @property @pulumi.getter(name="environmentVariables") def environment_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AppEnvironmentVariableArgs']]]]: return pulumi.get(self, "environment_variables") @environment_variables.setter def environment_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AppEnvironmentVariableArgs']]]]): pulumi.set(self, "environment_variables", value) @property @pulumi.getter(name="pullRequestEnvironmentName") def pull_request_environment_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "pull_request_environment_name") @pull_request_environment_name.setter def pull_request_environment_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "pull_request_environment_name", value) @property @pulumi.getter def stage(self) -> Optional[pulumi.Input['AppAutoBranchCreationConfigStage']]: return pulumi.get(self, "stage") @stage.setter def stage(self, value: Optional[pulumi.Input['AppAutoBranchCreationConfigStage']]): pulumi.set(self, "stage", value) @pulumi.input_type class AppBasicAuthConfigArgs: def __init__(__self__, *, enable_basic_auth: Optional[pulumi.Input[bool]] = None, password: Optional[pulumi.Input[str]] = None, username: Optional[pulumi.Input[str]] = None): if enable_basic_auth is not None: pulumi.set(__self__, "enable_basic_auth", enable_basic_auth) if password is not None: pulumi.set(__self__, "password", password) if username is not None: pulumi.set(__self__, "username", username) @property @pulumi.getter(name="enableBasicAuth") def enable_basic_auth(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_basic_auth") @enable_basic_auth.setter def enable_basic_auth(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_basic_auth", value) @property @pulumi.getter def password(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "password") @password.setter def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) @property @pulumi.getter def username(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "username") @username.setter def username(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "username", value) @pulumi.input_type class AppCustomRuleArgs: def __init__(__self__, *, source: pulumi.Input[str], target: pulumi.Input[str], condition: Optional[pulumi.Input[str]] = None, status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "source", source) pulumi.set(__self__, "target", target) if condition is not None: pulumi.set(__self__, "condition", condition) if status is not None: pulumi.set(__self__, "status", status) @property @pulumi.getter def source(self) -> pulumi.Input[str]: return pulumi.get(self, "source") @source.setter def source(self, value: pulumi.Input[str]): pulumi.set(self, "source", value) @property @pulumi.getter def target(self) -> pulumi.Input[str]: return pulumi.get(self, "target") @target.setter def target(self, value: pulumi.Input[str]): pulumi.set(self, "target", value) @property @pulumi.getter def condition(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "condition") @condition.setter def condition(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "condition", value) @property @pulumi.getter def status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "status") @status.setter def status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "status", value) @pulumi.input_type class AppEnvironmentVariableArgs: def __init__(__self__, *, name: pulumi.Input[str], value: pulumi.Input[str]): pulumi.set(__self__, "name", name) pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> pulumi.Input[str]: return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> pulumi.Input[str]: return pulumi.get(self, "value") @value.setter def value(self, value: pulumi.Input[str]): pulumi.set(self, "value", value) @pulumi.input_type class AppTagArgs: def __init__(__self__, *, key: pulumi.Input[str], value: pulumi.Input[str]): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> pulumi.Input[str]: return pulumi.get(self, "key") @key.setter def key(self, value: pulumi.Input[str]): pulumi.set(self, "key", value) @property @pulumi.getter def value(self) -> pulumi.Input[str]: return pulumi.get(self, "value") @value.setter def value(self, value: pulumi.Input[str]): pulumi.set(self, "value", value) @pulumi.input_type class BranchBasicAuthConfigArgs: def __init__(__self__, *, password: pulumi.Input[str], username: pulumi.Input[str], enable_basic_auth: Optional[pulumi.Input[bool]] = None): pulumi.set(__self__, "password", password) pulumi.set(__self__, "username", username) if enable_basic_auth is not None: pulumi.set(__self__, "enable_basic_auth", enable_basic_auth) @property @pulumi.getter def password(self) -> pulumi.Input[str]: return pulumi.get(self, "password") @password.setter def password(self, value: pulumi.Input[str]): pulumi.set(self, "password", value) @property @pulumi.getter def username(self) -> pulumi.Input[str]: return pulumi.get(self, "username") @username.setter def username(self, value: pulumi.Input[str]): pulumi.set(self, "username", value) @property @pulumi.getter(name="enableBasicAuth") def enable_basic_auth(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_basic_auth") @enable_basic_auth.setter def enable_basic_auth(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_basic_auth", value) @pulumi.input_type class BranchEnvironmentVariableArgs: def __init__(__self__, *, name: pulumi.Input[str], value: pulumi.Input[str]): pulumi.set(__self__, "name", name) pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> pulumi.Input[str]: return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> pulumi.Input[str]: return pulumi.get(self, "value") @value.setter def value(self, value: pulumi.Input[str]): pulumi.set(self, "value", value) @pulumi.input_type class BranchTagArgs: def __init__(__self__, *, key: pulumi.Input[str], value: pulumi.Input[str]): pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> pulumi.Input[str]: return pulumi.get(self, "key") @key.setter def key(self, value: pulumi.Input[str]): pulumi.set(self, "key", value) @property @pulumi.getter def value(self) -> pulumi.Input[str]: return pulumi.get(self, "value") @value.setter def value(self, value: pulumi.Input[str]): pulumi.set(self, "value", value) @pulumi.input_type class DomainSubDomainSettingArgs: def __init__(__self__, *, branch_name: pulumi.Input[str], prefix: pulumi.Input[str]): pulumi.set(__self__, "branch_name", branch_name) pulumi.set(__self__, "prefix", prefix) @property @pulumi.getter(name="branchName") def branch_name(self) -> pulumi.Input[str]: return pulumi.get(self, "branch_name") @branch_name.setter def branch_name(self, value: pulumi.Input[str]): pulumi.set(self, "branch_name", value) @property @pulumi.getter def prefix(self) -> pulumi.Input[str]: return pulumi.get(self, "prefix") @prefix.setter def prefix(self, value: pulumi.Input[str]): pulumi.set(self, "prefix", value)
1.5625
2
awsecommerceservice/models/item_lookup_request.py
nidaizamir/Test-PY
0
11505
# -*- coding: utf-8 -*- """ awsecommerceservice This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ). """ class ItemLookupRequest(object): """Implementation of the 'ItemLookupRequest' model. TODO: type model description here. Attributes: condition (ConditionEnum): TODO: type description here. id_type (IdTypeEnum): TODO: type description here. merchant_id (string): TODO: type description here. item_id (list of string): TODO: type description here. response_group (list of string): TODO: type description here. search_index (string): TODO: type description here. variation_page (object): TODO: type description here. related_item_page (object): TODO: type description here. relationship_type (list of string): TODO: type description here. include_reviews_summary (string): TODO: type description here. truncate_reviews_at (int): TODO: type description here. """ # Create a mapping from Model property names to API property names _names = { "condition":'Condition', "id_type":'IdType', "merchant_id":'MerchantId', "item_id":'ItemId', "response_group":'ResponseGroup', "search_index":'SearchIndex', "variation_page":'VariationPage', "related_item_page":'RelatedItemPage', "relationship_type":'RelationshipType', "include_reviews_summary":'IncludeReviewsSummary', "truncate_reviews_at":'TruncateReviewsAt' } def __init__(self, condition=None, id_type=None, merchant_id=None, item_id=None, response_group=None, search_index=None, variation_page=None, related_item_page=None, relationship_type=None, include_reviews_summary=None, truncate_reviews_at=None): """Constructor for the ItemLookupRequest class""" # Initialize members of the class self.condition = condition self.id_type = id_type self.merchant_id = merchant_id self.item_id = item_id self.response_group = response_group self.search_index = search_index self.variation_page = variation_page self.related_item_page = related_item_page self.relationship_type = relationship_type self.include_reviews_summary = include_reviews_summary self.truncate_reviews_at = truncate_reviews_at @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary condition = dictionary.get('Condition') id_type = dictionary.get('IdType') merchant_id = dictionary.get('MerchantId') item_id = dictionary.get('ItemId') response_group = dictionary.get('ResponseGroup') search_index = dictionary.get('SearchIndex') variation_page = dictionary.get('VariationPage') related_item_page = dictionary.get('RelatedItemPage') relationship_type = dictionary.get('RelationshipType') include_reviews_summary = dictionary.get('IncludeReviewsSummary') truncate_reviews_at = dictionary.get('TruncateReviewsAt') # Return an object of this model return cls(condition, id_type, merchant_id, item_id, response_group, search_index, variation_page, related_item_page, relationship_type, include_reviews_summary, truncate_reviews_at)
2.046875
2
data.py
zhaoyun630/R-NET-in-Keras
207
11506
<filename>data.py from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy as np import cPickle as pickle from keras import backend as K from keras.utils import np_utils from keras.preprocessing import sequence from random import shuffle import itertools def load_dataset(filename): with open(filename, 'rb') as f: return pickle.load(f) def padded_batch_input(input, indices=None, dtype=K.floatx(), maxlen=None): if indices is None: indices = np.arange(len(input)) batch_input = [input[i] for i in indices] return sequence.pad_sequences(batch_input, maxlen, dtype, padding='post') def categorical_batch_target(target, classes, indices=None, dtype=K.floatx()): if indices is None: indices = np.arange(len(target)) batch_target = [min(target[i], classes-1) for i in indices] return np_utils.to_categorical(batch_target, classes).astype(dtype) def lengthGroup(length): if length < 150: return 0 if length < 240: return 1 if length < 380: return 2 if length < 520: return 3 if length < 660: return 4 return 5 class BatchGen(object): def __init__(self, inputs, targets=None, batch_size=None, stop=False, shuffle=True, balance=False, dtype=K.floatx(), flatten_targets=False, sort_by_length=False, group=False, maxlen=None): assert len(set([len(i) for i in inputs])) == 1 assert(not shuffle or not sort_by_length) self.inputs = inputs self.nb_samples = len(inputs[0]) self.batch_size = batch_size if batch_size else self.nb_samples self.dtype = dtype self.stop = stop self.shuffle = shuffle self.balance = balance self.targets = targets self.flatten_targets = flatten_targets if isinstance(maxlen, (list, tuple)): self.maxlen = maxlen else: self.maxlen = [maxlen] * len(inputs) self.sort_by_length = None if sort_by_length: self.sort_by_length = np.argsort([-len(p) for p in inputs[0]]) # if self.targets and self.balance: # self.class_weight = class_weight(self.targets) self.generator = self._generator() self._steps = -(-self.nb_samples // self.batch_size) # round up self.groups = None if group is not False: indices = np.arange(self.nb_samples) ff = lambda i: lengthGroup(len(inputs[0][i])) indices = np.argsort([ff(i) for i in indices]) self.groups = itertools.groupby(indices, ff) self.groups = {k: np.array(list(v)) for k, v in self.groups} def _generator(self): while True: if self.shuffle: permutation = np.random.permutation(self.nb_samples) elif self.sort_by_length is not None: permutation = self.sort_by_length elif self.groups is not None: # permutation = np.arange(self.nb_samples) # tmp = permutation.copy() # for id in self.group_ids: # mask = (self.groups==id) # tmp[mask] = np.random.permutation(permutation[mask]) # permutation = tmp # import ipdb # ipdb.set_trace() for k, v in self.groups.items(): np.random.shuffle(v) tmp = np.concatenate(self.groups.values()) batches = np.array_split(tmp, self._steps) remainder = [] if len(batches[-1]) < self._steps: remainder = batches[-1:] batches = batches[:-1] shuffle(batches) batches += remainder permutation = np.concatenate(batches) else: permutation = np.arange(self.nb_samples) i = 0 longest = 767 while i < self.nb_samples: if self.sort_by_length is not None: bs = self.batch_size * 767 // self.inputs[0][permutation[i]].shape[0] else: bs = self.batch_size indices = permutation[i : i + bs] i = i + bs # for i in range(0, self.nb_samples, self.batch_size): # indices = permutation[i : i + self.batch_size] batch_X = [padded_batch_input(x, indices, self.dtype, maxlen) for x, maxlen in zip(self.inputs, self.maxlen)] P = batch_X[0].shape[1] if not self.targets: yield batch_X continue batch_Y = [categorical_batch_target(target, P, indices, self.dtype) for target in self.targets] if self.flatten_targets: batch_Y = np.concatenate(batch_Y, axis=-1) if not self.balance: yield (batch_X, batch_Y) continue # batch_W = np.array([self.class_weight[y] for y in batch_targets]) batch_W = np.array([bs / self.batch_size for x in batch_X[0]]).astype(self.dtype) yield (batch_X, batch_Y, batch_W) if self.stop: raise StopIteration def __iter__(self): return self.generator def next(self): return self.generator.next() def __next__(self): return self.generator.__next__() def steps(self): if self.sort_by_length is None: return self._steps print("Steps was called") if self.shuffle: permutation = np.random.permutation(self.nb_samples) elif self.sort_by_length is not None: permutation = self.sort_by_length else: permutation = np.arange(self.nb_samples) i = 0 longest = 767 self._steps = 0 while i < self.nb_samples: bs = self.batch_size * 767 // self.inputs[0][permutation[i]].shape[0] i = i + bs self._steps += 1 return self._steps batch_gen = BatchGen # for backward compatibility
2.703125
3
algoritmos/ajuste-curvas/caso-linear/Teste.py
mauriciomoccelin/metodos-numericos
3
11507
<gh_stars>1-10 from RegressaoLinear import RegressaoLinear planoCartesiano = { 0.5: 4.4, 2.8: 1.8, 4.2: 1.0, 6.7: 0.4, 8.3: 0.2 } regressaoLinear = RegressaoLinear(planoCartesiano) print(regressaoLinear.gerar_equacao())
2.078125
2
src/app.py
UBC-MDS/dsci_532_group19
0
11508
<gh_stars>0 import dash from dash import html from dash import dcc import dash_bootstrap_components as dbc from dash.dependencies import Input, Output from .layout import * from .plot import * # from layout import * # from plot import * app = dash.Dash( __name__, external_stylesheets=[dbc.themes.BOOTSTRAP, "/css/button.css"] ) app.title = "Data Science Salaries" server = app.server app.layout = html.Div( [ dcc.Location(id="url", refresh=False), topbar, content, # sidebar, ] ) @app.callback(Output("scatter", "srcDoc"), Input("data_scientist", "value")) def update(DS_identity): rst = plot_sidebar(DS_identity) return rst @app.callback( Output("world_map", "srcDoc"), [Input("select-country", "value")], ) def update(xcon): return plot_map(xcon) @app.callback( Output("salary_heatmap", "srcDoc"), [Input("xslider_1", "value"), Input("select-country", "value")], ) def update(xmax, xcon): return plot_salary_heatmap(xmax, xcon) @app.callback( Output("gender-boxplot", "srcDoc"), [Input("xslider_1", "value"), Input("select-country", "value")], ) def update(xmax, xcon): return plot_gender_boxplot(xmax, xcon) @app.callback( Output("edu_histogram", "srcDoc"), [ Input("xslider_1", "value"), Input("select-country", "value"), Input("select-stacking", "value") ] ) def update(xmax, xcon, stack): return plot_edu_histo(xmax, xcon, stack) if __name__ == "__main__": app.run_server(debug=True)
2.328125
2
jaeger-cli/rpc.py
shwsun/jaeger-cli
0
11509
# Copyright 2018 MassOpenCloud. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', 'TRANSPORT_ALIASES', ] import functools from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import importutils from oslo_utils import timeutils import nova.conf import nova.context import nova.exception from nova.i18n import _ from nova import objects profiler = importutils.try_import("osprofiler.profiler") CONF = nova.conf.CONF LOG = logging.getLogger(__name__) TRANSPORT = None LEGACY_NOTIFIER = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ nova.exception.__name__, ] EXTRA_EXMODS = [] # NOTE(markmc): The nova.openstack.common.rpc entries are for backwards compat # with Havana rpc_backend configuration values. The nova.rpc entries are for # compat with Essex values. TRANSPORT_ALIASES = { 'nova.openstack.common.rpc.impl_kombu': 'rabbit', 'nova.openstack.common.rpc.impl_qpid': 'qpid', 'nova.openstack.common.rpc.impl_zmq': 'zmq', 'nova.rpc.impl_kombu': 'rabbit', 'nova.rpc.impl_qpid': 'qpid', 'nova.rpc.impl_zmq': 'zmq', } class RequestContextSerializer(messaging.Serializer): """Request context serializer and deserializer from Nova.rpc. This is the original serializer from nova. Nothing is changed besides the docstring. """ def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return nova.context.RequestContext.from_dict(context) class ProfilerRequestContextSerializer(RequestContextSerializer): """Serializer and deserializer impl. Serializer and deserializer impl based on Jaeger tracing metadata propagation. For usage check out docs/how-to.md . This is the only impl that is important. """ pass
1.3125
1
data_structure/tree/test_binarytree.py
lp1225/my_algorithm
1
11510
<reponame>lp1225/my_algorithm # 搜索二叉树 from queue import Queue class Node(object): """节点""" def __init__(self, data): self.data = data self.node_left = None self.node_right = None class BinaryTree(object): def __init__(self): self.root = None def insert(tree, node): """插入节点""" if tree.root == None: tree.root = node else: temp = tree.root # 必须要有一个临时节点 while temp != None: if temp.data > node.data: if temp.node_left == None: temp.node_left = node return else: temp = temp.node_left else: if temp.node_right == None: temp.node_right = node return else: temp = temp.node_right def preorder(node): """先序遍历""" if node != None: print(node.data, end='') preorder(node.node_left) preorder(node.node_right) def inorder(node): """中序遍历""" if node != None: inorder(node.node_left) print(node.data, end='') inorder(node.node_right) def postorder(node): """后序遍历""" if node != None: postorder(node.node_left) postorder(node.node_right) print(node.data, end='') def get_height(node): """得到最大高度k""" if node == None: return 0 max_left = get_height(node.node_left) max_right = get_height(node.node_right) max_value = max(max_left, max_right) return max_value+1 def get_node(node, k): """得到k层的节点""" if node == None: return if k == 1: if node.data !=None: print(node.data, end='') get_node(node.node_left, k-1) get_node(node.node_right, k-1) def get_max(node): """查找最大值 在右子树中找 """ if node != None: while node.node_right != None: node = node.node_right return node.data def get_min(node): """查找最小值""" if node != None: while node.node_left != None: node = node.node_left return node.data def comorder(node): q = Queue() q.put(node) if node == None: return else: while q.empty() != True: node = q.get(0) print(node.data, end='') if node.node_left != None: q.put(node.node_left) if node.node_right != None: q.put(node.node_right) def Mirror(node): """反转二叉树, 顺序执行,nice """ if node == None: return if node.node_left == None and node.node_right == None: return temp = node.node_left node.node_left = node.node_right node.node_right = temp Mirror(node.node_left) Mirror(node.node_right) if __name__ == '__main__': tree = BinaryTree() arr_test = [6, 3, 8, 2, 5, 1, 7] for i in arr_test: insert(tree, Node(i)) # preorder(tree.root) # print() # inorder(tree.root) # print() # get_node(tree.root, 3) # print() # result = get_height(tree.root) # print(result) # max_value = get_max(tree.root) # print(max_value) # min_value = get_min(tree.root) # print(min_value) comorder(tree.root) Mirror(tree.root) print() comorder(tree.root)
4.15625
4
Python-desenvolvimento/ex036.py
MarcosMaciel-MMRS/Desenvolvimento-python
0
11511
<gh_stars>0 #empréstimos bancários. pegue o valor da casa, o salario da pessoa e em quanto tempo ela quer pagar. #se as parcelas ficarem acima de 30% do salario, negue o imprestimo. casa = float(input('Informe o valor da casa: R$')) salario = float(input('informe seu salario: R$')) tempo = int(input('Em quanto tempo planeja pagar: ')) parcela = casa/(tempo*12)#para fazer a conta com base em anos, levando em conta as parcelas mensais. print('Para pagar um casa de R${:.2f} e em {}anos, suas parcelas ficariam de R${:.2f}'.format(casa, tempo, parcela)) if parcela >= (salario*30/100): print('Com seu salário atual, não é possível efetuar esse empréstimo.') else: print('Empréstimo aprovado')
3.828125
4
UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py
Sanjay9921/Python
0
11512
#Integer division #You have a shop selling buns for $2.40 each. A customer comes in with $15, and would like to buy as many buns as possible. #Complete the code to calculate how many buns the customer can afford. #Note: Your customer won't be happy if you try to sell them part of a bun. #Print only the result, any other text in the output will cause the checker to fail. bun_price = 2.40 money = 15 print( money // bun_price )
3.984375
4
scripts/test_maths.py
paulscottrobson/Basic65816
0
11513
# ******************************************************************************************* # ******************************************************************************************* # # Name : test_maths.py # Purpose : Create lots of variables/arrays and arithmetic/bitwise. # Date : 10th June 2019 # Author : <NAME> (<EMAIL>) # # ******************************************************************************************* # ******************************************************************************************* import random from variables import * def calculate(op,a,b): if op == "+": return a + b if op == "-": return a - b if op == "*": return a * b if op == "%": return a % b if op == "/": return int(a / b) if op == "&": return a & b if op == "|": return a | b if op == "^": return a ^ b assert False if __name__ == "__main__": print("Arithmetic/Bitwise test code.") operators = "+,-,*,/,&,|,^".split(",") eb = EntityBucket(-1,60,0,10,0) # bs = BasicSource() bs.append(eb.setupCode()) bs.append(eb.assignCode()) for i in range(0,900): ok = False while not ok: v1 = eb.pickOne() v2 = eb.pickOne() operator = operators[random.randint(0,len(operators)-1)] ok = True if abs(v1.getValue()*v2.getValue()) >= 32768*4096: ok = False if (operator == "/" or operator == "%") and v2.getValue() == 0: ok = False r = calculate(operator,v1.getValue(),v2.getValue()) bs.append("assert ({0}{1}{2}) = {3}".format(v1.getEither(),operator,v2.getEither(),r)) bs.append(eb.checkCode()) bs.save() # blk = BasicBlock(0x4000,0x8000) blk.setBoot("run",False) blk.loadProgram() blk.exportFile("temp/basic.bin")
3.25
3
qa/char_analyze.py
JinkelaCrops/t2t-learning
5
11514
<gh_stars>1-10 # 所有的unicode字符 from collections import Counter import qa.regex_utils as regutil import re resource_path = "/media/tmxmall/a36811aa-0e87-4ba1-b14f-370134452449/data/medicine.txt" with open(resource_path, "r", encoding="utf8") as f: char_stream = f.read() char_dictionary = Counter(list(char_stream)) med_unicodes = regutil.expr_converter("[[%s]]" % "".join(char_dictionary.keys()).replace("\n", "") + "#[[\\u4e00-\\u9fff]]") format_med_unicodes = re.sub("(?<!-)(?=\\\\u)", "\n", med_unicodes) print(format_med_unicodes) lines = char_stream.split("\n") unknown_char = "[^\\u0020-\\u007e\\u4e00-\\u9fff]" def regex_filter_line(regex, lines): filter_sentence = list(filter(lambda x: re.search(regex, x) is not None, lines)) print("%20s" % regex, len(filter_sentence)) return len(filter_sentence) regutil.uu_enum("\\u0020-\\u007e") regex_filter_line("[\\u0020-\\u007e]", lines) regex_filter_line("[\\u00a0-\\u00ff]", lines) regex_filter_line("[\\u0100-\\u01ff]", lines) regex_filter_line("[\\u0251]", lines) regex_filter_line("[\\u025b]", lines) regex_filter_line("[\\u0261]", lines) regex_filter_line("[\\u028a]", lines) regex_filter_line("[\\u02c6-\\u02cb]", lines) regex_filter_line("[\\u02d0]", lines) regex_filter_line("[\\u02d8-\\u02da]", lines) regex_filter_line("[\\u02dc]", lines) regex_filter_line("[\\u037a]", lines) regex_filter_line("[\\u037e]", lines) regex_filter_line("[\\u038a]", lines) regex_filter_line("[\\u038c]", lines) regex_filter_line("[\\u03cb]", lines) regex_filter_line("[\\u03d6]", lines) regex_filter_line("[\\u0384-\\u0385]", lines) regex_filter_line("[\\u0387-\\u0388]", lines) regex_filter_line("[\\u038e-\\u038f]", lines) regex_filter_line("[\\u0391-\\u03c9]", lines) regex_filter_line("[\\u0400-\\u04ff]", lines) regex_filter_line("[\\u0590-\\u05ff]", lines) regex_filter_line("[\\u0652]", lines) regex_filter_line("[\\u11bc]", lines) regex_filter_line("[\\u1868]", lines) regex_filter_line("[\\u1d31]", lines) regex_filter_line("[\\u1d52]", lines) regex_filter_line("[\\u1d5b]", lines) regex_filter_line("[\\u1ef7]", lines) regex_filter_line("[\\u2016-\\u206a]", lines) regex_filter_line("[\\u2070]", lines) regex_filter_line("[\\u2074-\\u2075]", lines) regex_filter_line("[\\u2077-\\u2078]", lines) regex_filter_line("[\\u2082-\\u2084]", lines) regex_filter_line("[\\u20ac]", lines) regex_filter_line("[\\u2103]", lines) regex_filter_line("[\\u2105]", lines) regex_filter_line("[\\u2109]", lines) regex_filter_line("[\\u2116]", lines) regex_filter_line("[\\u2122]", lines) regex_filter_line("[\\u212b]", lines) regex_filter_line("[\\u2160-\\u216b]", lines) regex_filter_line("[\\u2170-\\u2179]", lines) regex_filter_line("[\\u21d2]", lines) regex_filter_line("[\\u2190-\\u2193]", lines) regex_filter_line("[\\u2206]", lines) regex_filter_line("[\\u2208]", lines) regex_filter_line("[\\u2211-\\u2212]", lines) regex_filter_line("[\\u2217-\\u221a]", lines) regex_filter_line("[\\u221d-\\u2220]", lines) regex_filter_line("[\\u2223]", lines) regex_filter_line("[\\u2225]", lines) regex_filter_line("[\\u2227-\\u222b]", lines) regex_filter_line("[\\u222e]", lines) regex_filter_line("[\\u2234]", lines) regex_filter_line("[\\u2237]", lines) regex_filter_line("[\\u223c-\\u223d]", lines) regex_filter_line("[\\u2245]", lines) regex_filter_line("[\\u224c]", lines) regex_filter_line("[\\u2252]", lines) regex_filter_line("[\\u2260-\\u2261]", lines) regex_filter_line("[\\u2264-\\u2267]", lines) regex_filter_line("[\\u226f]", lines) regex_filter_line("[\\u2295]", lines) regex_filter_line("[\\u2299]", lines) regex_filter_line("[\\u22a5]", lines) regex_filter_line("[\\u22bf]", lines) regex_filter_line("[\\u2312]", lines) regex_filter_line("[\\u2395]", lines) regex_filter_line("[\\u2460-\\u2473]", lines) regex_filter_line("[\\u2474-\\u2487]", lines) regex_filter_line("[\\u2488-\\u249b]", lines) regex_filter_line("[\\u2500-\\u257f]", lines) regex_filter_line("[\\u25a0-\\u25a1]", lines) regex_filter_line("[\\u25b2-\\u25b4]", lines) regex_filter_line("[\\u25c6-\\u25c7]", lines) regex_filter_line("[\\u25ca-\\u25cb]", lines) regex_filter_line("[\\u25ce-\\u25cf]", lines) regex_filter_line("[\\u2605-\\u2606]", lines) regex_filter_line("[\\u2609]", lines) regex_filter_line("[\\u2610]", lines) regex_filter_line("[\\u2640]", lines) regex_filter_line("[\\u2642]", lines) regex_filter_line("[\\u2666]", lines) regex_filter_line("[\\u266a-\\u266b]", lines) regex_filter_line("[\\u2714]", lines) regex_filter_line("[\\u2717]", lines) regex_filter_line("[\\u274f]", lines) regex_filter_line("[\\u2751]", lines) regex_filter_line("[\\u279f]", lines) regex_filter_line("[\\u27a2]", lines) regex_filter_line("[\\u27a5]", lines) regex_filter_line("[\\u2a7d]", lines) regex_filter_line("[\\u2fd4]", lines) regex_filter_line("[\\u3001-\\u301e]", lines) regex_filter_line("[\\u3022-\\u3025]", lines) regex_filter_line("[\\u3105-\\u3107]", lines) regex_filter_line("[\\u310a]", lines) regex_filter_line("[\\u3111]", lines) regex_filter_line("[\\u3113]", lines) regex_filter_line("[\\u3116-\\u3117]", lines) regex_filter_line("[\\u311a-\\u311b]", lines) regex_filter_line("[\\u3122]", lines) regex_filter_line("[\\u3125]", lines) regex_filter_line("[\\u3127-\\u3128]", lines) regex_filter_line("[\\u3220-\\u3229]", lines) regex_filter_line("[\\u32a3]", lines) regex_filter_line("[\\u338e-\\u338f]", lines) regex_filter_line("[\\u339c-\\u339d]", lines) regex_filter_line("[\\u33a1]", lines) regex_filter_line("[\\u33a5]", lines) regex_filter_line("[\\u33d5]", lines) regex_filter_line("[\\u33d1-\\u33d2]", lines) regex_filter_line("[\\u359e]", lines) regex_filter_line("[\\u39d1]", lines) regex_filter_line("[\\u41f2]", lines) regex_filter_line("[\\u4341]", lines) regex_filter_line("[\\u4d13]", lines) regex_filter_line("[\\u4d15]", lines) regex_filter_line("[\\u4e00-\\u9fff]", lines) regex_filter_line("[\\uacf3]", lines) regex_filter_line("[\\ucd38]", lines) regex_filter_line("[\\ue20c-\\ue2ff]", lines) regex_filter_line("[\\uf900-\\ufaff]", lines) regex_filter_line("[\\ufb03]", lines) regex_filter_line("[\\ufe30-\\ufe31]", lines) regex_filter_line("[\\ufe33]", lines) regex_filter_line("[\\ufe38]", lines) regex_filter_line("[\\ufe3c-\\ufe3d]", lines) regex_filter_line("[\\ufe3f-\\ufe41]", lines) regex_filter_line("[\\ufe4d-\\ufe4e]", lines) regex_filter_line("[\\ufe55-\\ufe57]", lines) regex_filter_line("[\\ufe59-\\ufe5c]", lines) regex_filter_line("[\\ufe5f]", lines) regex_filter_line("[\\ufe63]", lines) regex_filter_line("[\\ufe65-\\ufe66]", lines) regex_filter_line("[\\ufe6a-\\ufe6b]", lines) regex_filter_line("[\\ufeff]", lines) regex_filter_line("[\\uff01]", lines) regex_filter_line("[\\uff08-\\uff09]", lines) regex_filter_line("[\\uff0c]", lines) regex_filter_line("[\\uff1a]", lines) regex_filter_line("[\\uff1f]", lines) regex_filter_line("[\\uff61]", lines) regex_filter_line("[\\uff63]", lines) regex_filter_line("[\\uff65]", lines) regex_filter_line("[\\uff6c]", lines) regex_filter_line("[\\uff72]", lines) regex_filter_line("[\\uff86]", lines) regex_filter_line("[\\uff89]", lines) regex_filter_line("[\\uffe0-\\uffe1]", lines) regex_filter_line("[\\uffe3]", lines) regex_filter_line("[\\uffe5]", lines) regex_filter_line("[\\uffed]", lines) regex_filter_line("[\\ufffc]", lines) """ [\u0020-\u007e] 13056272 \\u0020-\\u007e Latin [\u00a0-\u00ff] 258619 \\u00a0-\\u00ff Latin ++ [\u0100-\u01ff] 353 \\u0100-\\u01ff Latin ++ [\u0251] 302 \\u0251 ɑ [\u025b] 2 \\u025b ɛ [\u0261] 25 \\u0261 ɡ [\u028a] 1 \\u028a ʊ [\u02c6-\u02cb] 870 \\u02c6-\\u02cb ˆˇˈˉˊˋ [\u02d0] 1 \\u02d0 ː [\u02d8-\u02da] 25 \\u02d8-\\u02da ˘˙˚ [\u02dc] 10 \\u02dc ˜ [\u037a] 1 \\u037a ͺ [\u037e] 4 \\u037e ; [\u038a] 3 \\u038a Ί [\u038c] 1 \\u038c Ό [\u03cb] 3 \\u03cb ϋ [\u03d6] 2 \\u03d6 ϖ [\u0384-\u0385] 8 \\u0384-\\u0385 ΄΅ [\u0387-\u0388] 2 \\u0387-\\u0388 ·Έ [\u038e-\u038f] 2 \\u038e-\\u038f ΎΏ [\u0391-\u03c9] 567276 \\u0391-\\u03c9 希腊 [\u0400-\u04ff] 2058 \\u0400-\\u04ff 西里尔 [\u0590-\u05ff] 34 \\u0590-\\u05ff 希伯来 [\u0652] 1 \\u0652 阿拉伯 [\u11bc] 3 \\u11bc 朝鲜 [\u1868] 1 \\u1868 ᡨ 蒙古 [\u1d31] 1 \\u1d31 ᴱ [\u1d52] 1 \\u1d52 ᵒ [\u1d5b] 1 \\u1d5b ᵛ [\u1ef7] 1 \\u1ef7 ỷ Latin ++ [\u2016-\u206a] 323353 \\u2016-\\u206a punc++ [\u2070] 4 \\u2070 ⁰ [\u2074-\u2075] 9 \\u2074-\\u2075 ⁴⁵ [\u2077-\u2078] 11 \\u2077-\\u2078 ⁷⁸ [\u2082-\u2084] 13 \\u2082-\\u2084 ₂₃₄ [\u20ac] 58 \\u20ac € [\u2103] 132218 \\u2103 ℃ [\u2105] 64 \\u2105 ℅ [\u2109] 45 \\u2109 ℉ [\u2116] 559 \\u2116 № [\u2122] 348 \\u2122 ™ [\u212b] 5 \\u212b Å [\u2160-\u216b] 235239 \\u2160-\\u216b ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ [\u2170-\u2179] 1557 \\u2170-\\u2179 ⅰⅱⅲⅳⅴⅵⅶⅷⅸ [\u21d2] 3 \\u21d2 ⇒ [\u2190-\u2193] 15107 \\u2190-\\u2193 ←↑→↓ [\u2206] 5 \\u2206 ∆ [\u2208] 281 \\u2208 ∈ [\u2211-\u2212] 839 \\u2211-\\u2212 ∑− [\u2217-\u221a] 75 \\u2217-\\u221a ∗∘∙√ [\u221d-\u2220] 861 \\u221d-\\u2220 ∝∞∟∠ [\u2223] 1 \\u2223 ∣ [\u2225] 80 \\u2225 ∥ [\u2227-\u222b] 226 \\u2227-\\u222b ∧∨∩∪∫ [\u222e] 8 \\u222e ∮ [\u2234] 46 \\u2234 ∴ [\u2237] 333 \\u2237 ∷ [\u223c-\u223d] 29 \\u223c-\\u223d ∼∽ [\u2245] 1 \\u2245 ≅ [\u224c] 33 \\u224c ≌ [\u2252] 4 \\u2252 ≒ [\u2260-\u2261] 555 \\u2260-\\u2261 ≠≡ [\u2264-\u2267] 31397 \\u2264-\\u2267 ≤≥≦≧ [\u226f] 3 \\u226f ≯ [\u2295] 4 \\u2295 ⊕ [\u2299] 17 \\u2299 ⊙ [\u22a5] 41 \\u22a5 ⊥ [\u22bf] 116 \\u22bf ⊿ [\u2312] 5 \\u2312 ⌒ [\u2395] 4 \\u2395 ⎕ [\u2460-\u2473] 48470 \\u2460-\\u2473 ①②③④⑤⑥⑦⑧⑨⑩ ⑳ [\u2474-\u2487] 1267 \\u2474-\\u2487 ⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽ ⒇ [\u2488-\u249b] 107 \\u2488-\\u249b ⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑ ⒛ [\u2500-\u257f] 566 \\u2500-\\u257f ─━│┃┄┅┆┇┈┉┊ [\u25a0-\u25a1] 1052 \\u25a0-\\u25a1 ■□ [\u25b2-\u25b4] 3695 \\u25b2-\\u25b4 ▲△▴ [\u25c6-\u25c7] 205 \\u25c6-\\u25c7 ◆◇ [\u25ca-\u25cb] 339 \\u25ca-\\u25cb ◊○ [\u25ce-\u25cf] 767 \\u25ce-\\u25cf ◎● [\u2605-\u2606] 196 \\u2605-\\u2606 ★☆ [\u2609] 3 \\u2609 ☉ [\u2610] 35 \\u2610 ☐ [\u2640] 1017 \\u2640 ♀ [\u2642] 1108 \\u2642 ♂ [\u2666] 2 \\u2666 ♦ [\u266a-\u266b] 9 \\u266a-\\u266b ♪♫ [\u2714] 4 \\u2714 ✔ [\u2717] 1 \\u2717 ✗ [\u274f] 1 \\u274f ❏ [\u2751] 2 \\u2751 ❑ [\u279f] 1 \\u279f ➟ [\u27a2] 6 \\u27a2 ➢ [\u27a5] 1 \\u27a5 ➥ [\u2a7d] 3 \\u2a7d ⩽ [\u2fd4] 2 \\u2fd4 ⿔ CJK++ [\u3001-\u301e] 7028921 \\u3001-\\u301e CJK punc [\u3022-\u3025] 8 \\u3022-\\u3025 〢〣〤〥 [\u3105-\u3107] 8 \\u3105-\\u3107 ㄅㄆ [\u310a] 1 \\u310a ㄊ [\u3111] 1 \\u3111 ㄑ [\u3113] 2 \\u3113 ㄓ [\u3116-\u3117] 6 \\u3116-\\u3117 ㄖㄗ [\u311a-\u311b] 2 \\u311a-\\u311b ㄚㄛ [\u3122] 1 \\u3122 ㄢ [\u3125] 1 \\u3125 ㄥ [\u3127-\u3128] 11 \\u3127-\\u3128 ㄧㄨ [\u3220-\u3229] 312 \\u3220-\\u3229 ㈠㈡㈢㈣㈤㈥㈦㈧㈨ [\u32a3] 6 \\u32a3 ㊣ [\u338e-\u338f] 125 \\u338e-\\u338f ㎎㎏ [\u339c-\u339d] 75 \\u339c-\\u339d ㎜㎝ [\u33a1] 59 \\u33a1 ㎡ [\u33a5] 1 \\u33a5 ㎥ [\u33d5] 24 \\u33d5 ㏕ [\u33d1-\u33d2] 9 \\u33d1-\\u33d2 ㏑㏒ [\u359e] 6 \\u359e 㖞 [\u39d1] 3 \\u39d1 㧑 [\u41f2] 13 \\u41f2 䇲 [\u4341] 2 \\u4341 䍁 [\u4d13] 2 \\u4d13 䴓 [\u4d15] 1 \\u4d15 䴕 [\u4e00-\u9fff] 13056199 \\u4e00-\\u9fff CJK [\uacf3] 2 \\uacf3 곳 朝鲜++ [\ucd38] 1 \\ucd38 촸 朝鲜++ [\ue20c-\ue2ff] 1305 \\ue20c-\\ue2ff ??? [\uf900-\ufaff] 136 \\uf900-\\ufaff CJK ++ [\ufb03] 1 \\ufb03 ffi [\ufe30-\ufe31] 941 \\ufe30-\\ufe31 ︰︱ [\ufe33] 2 \\ufe33 ︳ [\ufe38] 4 \\ufe38 ︸ [\ufe3c-\ufe3d] 33 \\ufe3c-\\ufe3d ︼︽ [\ufe3f-\ufe41] 19 \\ufe3f-\\ufe41 ︿﹀﹁ [\ufe4d-\ufe4e] 7 \\ufe4d-\\ufe4e ﹍﹎ [\ufe55-\ufe57] 102 \\ufe55-\\ufe57 ﹕﹖﹗ [\ufe59-\ufe5c] 185 \\ufe59-\\ufe5c ﹙﹚﹛ [\ufe5f] 10 \\ufe5f ﹟ [\ufe63] 70 \\ufe63 ﹣ [\ufe65-\ufe66] 551 \\ufe65-\\ufe66 ﹥﹦ [\ufe6a-\ufe6b] 233 \\ufe6a-\\ufe6b ﹪﹫ [\ufeff] 4 \\ufeff arabic ++ # FE70-FEFF [\uff01] 886 \\uff01 ! [\uff08-\uff09] 622070 \\uff08-\\uff09 () [\uff0c] 3445520 \\uff0c , [\uff1a] 471609 \\uff1a : [\uff1f] 9822 \\uff1f ? [\uff61] 2 \\uff61 。 [\uff63] 1 \\uff63 」 [\uff65] 8 \\uff65 ・ [\uff6c] 2 \\uff6c ャ [\uff72] 1 \\uff72 イ [\uff86] 1 \\uff86 ニ [\uff89] 1 \\uff89 ノ [\uffe0-\uffe1] 160 \\uffe0-\\uffe1 ¢£ [\uffe3] 7143 \\uffe3  ̄ [\uffe5] 57 \\uffe5 ¥ [\uffed] 9 \\uffed ■ [\ufffc] 1 \\ufffc  """ """ \\u0020-\\u007e Latin \\u00a0-\\u00ff Latin ++ \\u0100-\\u01ff Latin ++ \\u0251 ɑ \\u025b ɛ \\u0261 ɡ \\u028a ʊ \\u02c6-\\u02cb ˆˇˈˉˊˋ \\u02d0 ː \\u02d8-\\u02da ˘˙˚ \\u02dc ˜ \\u037a ͺ \\u037e ; \\u038a Ί \\u038c Ό \\u03cb ϋ \\u03d6 ϖ \\u0384-\\u0385 ΄΅ \\u0387-\\u0388 ·Έ \\u038e-\\u038f ΎΏ \\u0391-\\u03c9 希腊 \\u0400-\\u04ff 西里尔 \\u0590-\\u05ff 希伯来 \\u0652 阿拉伯 \\u11bc 朝鲜 \\u1868 ᡨ 蒙古 \\u1d31 ᴱ \\u1d52 ᵒ \\u1d5b ᵛ \\u1ef7 ỷ Latin ++ \\u2016-\\u206a punc++ \\u2070 ⁰ \\u2074-\\u2075 ⁴⁵ \\u2077-\\u2078 ⁷⁸ \\u2082-\\u2084 ₂₃₄ \\u20ac € \\u2103 ℃ \\u2105 ℅ \\u2109 ℉ \\u2116 № \\u2122 ™ \\u212b Å \\u2160-\\u216b ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ \\u2170-\\u2179 ⅰⅱⅲⅳⅴⅵⅶⅷⅸ \\u21d2 ⇒ \\u2190-\\u2193 ←↑→↓ \\u2206 ∆ \\u2208 ∈ \\u2211-\\u2212 ∑− \\u2217-\\u221a ∗∘∙√ \\u221d-\\u2220 ∝∞∟∠ \\u2223 ∣ \\u2225 ∥ \\u2227-\\u222b ∧∨∩∪∫ \\u222e ∮ \\u2234 ∴ \\u2237 ∷ \\u223c-\\u223d ∼∽ \\u2245 ≅ \\u224c ≌ \\u2252 ≒ \\u2260-\\u2261 ≠≡ \\u2264-\\u2267 ≤≥≦≧ \\u226f ≯ \\u2295 ⊕ \\u2299 ⊙ \\u22a5 ⊥ \\u22bf ⊿ \\u2312 ⌒ \\u2395 ⎕ \\u2460-\\u2473 ①②③④⑤⑥⑦⑧⑨⑩ ⑳ \\u2474-\\u2487 ⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽ ⒇ \\u2488-\\u249b ⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑ ⒛ \\u2500-\\u257f ─━│┃┄┅┆┇┈┉┊ \\u25a0-\\u25a1 ■□ \\u25b2-\\u25b4 ▲△▴ \\u25c6-\\u25c7 ◆◇ \\u25ca-\\u25cb ◊○ \\u25ce-\\u25cf ◎● \\u2605-\\u2606 ★☆ \\u2609 ☉ \\u2610 ☐ \\u2640 ♀ \\u2642 ♂ \\u2666 ♦ \\u266a-\\u266b ♪♫ \\u2714 ✔ \\u2717 ✗ \\u274f ❏ \\u2751 ❑ \\u279f ➟ \\u27a2 ➢ \\u27a5 ➥ \\u2a7d ⩽ \\u2fd4 ⿔ CJK++ \\u3001-\\u301e CJK punc \\u3022-\\u3025 〢〣〤〥 \\u3105-\\u3107 ㄅㄆ \\u310a ㄊ \\u3111 ㄑ \\u3113 ㄓ \\u3116-\\u3117 ㄖㄗ \\u311a-\\u311b ㄚㄛ \\u3122 ㄢ \\u3125 ㄥ \\u3127-\\u3128 ㄧㄨ \\u3220-\\u3229 ㈠㈡㈢㈣㈤㈥㈦㈧㈨ \\u32a3 ㊣ \\u338e-\\u338f ㎎㎏ \\u339c-\\u339d ㎜㎝ \\u33a1 ㎡ \\u33a5 ㎥ \\u33d5 ㏕ \\u33d1-\\u33d2 ㏑㏒ \\u359e 㖞 \\u39d1 㧑 \\u41f2 䇲 \\u4341 䍁 \\u4d13 䴓 \\u4d15 䴕 \\u4e00-\\u9fff CJK \\uacf3 곳 朝鲜++ \\ucd38 촸 朝鲜++ \\ue20c-\\ue2ff ??? \\uf900-\\ufaff CJK ++ \\ufb03 ffi \\ufe30-\\ufe31 ︰︱ \\ufe33 ︳ \\ufe38 ︸ \\ufe3c-\\ufe3d ︼︽ \\ufe3f-\\ufe41 ︿﹀﹁ \\ufe4d-\\ufe4e ﹍﹎ \\ufe55-\\ufe57 ﹕﹖﹗ \\ufe59-\\ufe5c ﹙﹚﹛ \\ufe5f ﹟ \\ufe63 ﹣ \\ufe65-\\ufe66 ﹥﹦ \\ufe6a-\\ufe6b ﹪﹫ \\ufeff arabic ++ # FE70-FEFF \\uff01 ! \\uff08-\\uff09 () \\uff0c , \\uff1a : \\uff1f ? \\uff61 。 \\uff63 」 \\uff65 ・ \\uff6c ャ \\uff72 イ \\uff86 ニ \\uff89 ノ \\uffe0-\\uffe1 ¢£ \\uffe3  ̄ \\uffe5 ¥ \\uffed ■ \\ufffc  """
2.625
3
challenges/largest_product_array/test_largest_product.py
jayadams011/data-structures-and-algorithms
0
11515
from largest_product.py import largest_product import pytest def test_product_returns(): """test if return is a single product """ assert largest_product.largest([[2, 2]]) is 4 def test_returns_largest(): """ test if return is the largest of longer array """ assert largest_product.largest([[1, 3], [6, 10], [4, 5]]) is 60 def test_empty_list(): """ test if returns msg if empty list """ assert largest_product.largest([]) == 'empty arr used' def test_check_if_syb_has_only_1_el(): """test for one value""" arr = [3] val = 0 assert largest_product.node_inside(arr, val) == 3
2.875
3
examples/transformer/model.py
namin/dm-haiku
0
11516
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Transformer model components.""" from typing import Optional import haiku as hk import jax import jax.numpy as jnp import numpy as np class CausalSelfAttention(hk.MultiHeadAttention): """Self attention with a causal mask applied.""" def __call__( self, query: jnp.ndarray, key: Optional[jnp.ndarray] = None, value: Optional[jnp.ndarray] = None, mask: Optional[jnp.ndarray] = None, ) -> jnp.ndarray: key = key if key is not None else query value = value if value is not None else query if query.ndim != 3: raise ValueError('Expect queries of shape [B, T, D].') seq_len = query.shape[1] causal_mask = np.tril(np.ones((seq_len, seq_len))) mask = mask * causal_mask if mask is not None else causal_mask return super().__call__(query, key, value, mask) class DenseBlock(hk.Module): """A 2-layer MLP which widens then narrows the input.""" def __init__(self, init_scale: float, widening_factor: int = 4, name: Optional[str] = None): super().__init__(name=name) self._init_scale = init_scale self._widening_factor = widening_factor def __call__(self, x: jnp.ndarray) -> jnp.ndarray: hiddens = x.shape[-1] initializer = hk.initializers.VarianceScaling(self._init_scale) x = hk.Linear(self._widening_factor * hiddens, w_init=initializer)(x) x = jax.nn.gelu(x) return hk.Linear(hiddens, w_init=initializer)(x) class Transformer(hk.Module): """A transformer stack.""" def __init__(self, num_heads: int, num_layers: int, dropout_rate: float, name: Optional[str] = None): super().__init__(name=name) self._num_layers = num_layers self._num_heads = num_heads self._dropout_rate = dropout_rate def __call__(self, h: jnp.ndarray, mask: Optional[jnp.ndarray], is_training: bool) -> jnp.ndarray: """Connects the transformer. Args: h: Inputs, [B, T, D]. mask: Padding mask, [B, T]. is_training: Whether we're training or not. Returns: Array of shape [B, T, D]. """ init_scale = 2. / self._num_layers dropout_rate = self._dropout_rate if is_training else 0. if mask is not None: mask = mask[:, None, None, :] # Note: names chosen to approximately match those used in the GPT-2 code; # see https://github.com/openai/gpt-2/blob/master/src/model.py. for i in range(self._num_layers): h_norm = layer_norm(h, name=f'h{i}_ln_1') h_attn = CausalSelfAttention( num_heads=self._num_heads, key_size=32, w_init_scale=init_scale, name=f'h{i}_attn')(h_norm, mask=mask) h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn) h = h + h_attn h_norm = layer_norm(h, name=f'h{i}_ln_2') h_dense = DenseBlock(init_scale, name=f'h{i}_mlp')(h_norm) h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense) h = h + h_dense h = layer_norm(h, name='ln_f') return h def layer_norm(x: jnp.ndarray, name: Optional[str] = None) -> jnp.ndarray: """Apply a unique LayerNorm to x with default settings.""" return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True, name=name)(x)
2.265625
2
face.py
shwang95/Intelligence-Surveillance-System
1
11517
#!/usr/bin/env python import boto3 import cv2 import numpy import os import base64 import gspread from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from httplib2 import Http from time import localtime, strftime, time, sleep from oauth2client.service_account import ServiceAccountCredentials from apiclient import discovery, errors from apiclient.discovery import build from oauth2client import client from oauth2client import tools from oauth2client import file def compare_faces( bucket, key, bucket_target, key_target, threshold=80, region='us-east-1'): ''' Require for face comparision ''' rekognition = boto3.client('rekognition', region) response = rekognition.compare_faces( SourceImage={ 'S3Object': { 'Bucket': bucket, 'Name': key, } }, TargetImage={ 'S3Object': { 'Bucket': bucket_target, 'Name': key_target, } }, SimilarityThreshold=threshold, ) return response['SourceImageFace'], response['FaceMatches'] def upload_log(text): ''' Upload the Alert time to the google drive sheet ''' scope = ['https://spreadsheets.google.com/feeds'] credentials = ServiceAccountCredentials.from_json_keyfile_name( 'ProjectLog-41cafcffcf13.json', scope) gc = gspread.authorize(credentials) wks = gc.open('ISeeU_Log').sheet1 wks.append_row([text]) def send(service, user_id, message): ''' Send the mime email package ''' try: message = ( service.users().messages().send( userId=user_id, body=message).execute()) print('Message Id: %s' % message['id']) return message except errors.HttpError as error: print('An error occurred: %s' % error) def create_email(sender, to, subject, message_text, pic): ''' Create the email Included information: Sender, Receiver, Subject, Text, Attached Image ''' message = MIMEMultipart() message['to'] = to message['from'] = sender message['Subject'] = subject msg = MIMEText(message_text) message.attach(msg) fp = open(pic, 'rb') msg = MIMEImage(fp.read(), _subtype='jpeg') fp.close() imagename = os.path.basename(pic) msg.add_header('Content-Disposition', 'attachment', filename=imagename) message.attach(msg) return {'raw': base64.urlsafe_b64encode(message.as_string())} def authenticate(): ''' Using oauth2 to get the credentials. It will give all permission related to gmail. client_secret.json is the secret key you get from google. Reference: Gmail API python quickstart ''' SCOPES = 'https://mail.google.com' store = file.Storage('credentials.json') creds = store.get() if not creds or creds.invalid: flow = client.flow_from_clientsecrets('client_secret.json', SCOPES) creds = tools.run_flow(flow, store) service = discovery.build('gmail', 'v1', http=creds.authorize(Http())) return service def stranger_detected(pic): ''' Recore the date time and make them as the code for the user the trigger alarm ''' nowtime = strftime("%Y-%m-%d %H:%M:%S", localtime()) trigcode = strftime("%d%H%M%S", localtime()) # Upload log to Google drive text = 'Stranger show up at ' + nowtime upload_log(text) # Information of email # pic = 'guldan.jpg' # Attached Image sender = "<EMAIL>" to = "<EMAIL>" # User email address subject = "Alert from ISeeU!" text = text + '\nReply ' + trigcode + ' to trigger the alarm.' # Sending email to user service = authenticate() message = create_email(sender, to, subject, text, pic) send(service, 'me', message) return service, subject, trigcode def main(): while True: print('No face detected...') if os.path.isfile('face.jpg'): print('Face found!') bucket_name = 'ec500j1-project-iseeu' source_name = ['sh.jpg'] # User input faces target_name = 'face.jpg' # Temporary image s3 = boto3.client('s3') # Upload images to s3 server for img in source_name: s3.upload_file(img, bucket_name, img) s3.upload_file(target_name, bucket_name, target_name) while True: try: # Check if the images are successfully uploaded for img in source_name: boto3.resource('s3').Object(bucket_name, img).load() boto3.resource('s3').Object( bucket_name, target_name).load() except BaseException: continue break sources, matches = {}, {} for img in source_name: try: sources[img], matches[img] = compare_faces( bucket_name, img, bucket_name, target_name) except Exception as e: # If Rekognition failure print('Rekognition error: ' + e) os.remove('face.jpg') if len(matches[img]) == 0: # Send notification email service, target, trigcode = stranger_detected( 'face.jpg') user_id = 'me' flag = False # Flag for trigger alert st = time() while time() - st < 120: # Listen for 2 minutes ''' Check all the email for user's reply every 30 seconds. If the subject match, check if the trigcode match. If the trigcode match too, return True to set off alarm. ''' threads = service.users().threads().list( userId=user_id).execute().get('threads', []) for thread in threads: tdata = service.users().threads().get( userId=user_id, id=thread['id']).execute() nmsgs = len(tdata['messages']) msg = tdata['messages'][0]['payload'] subject = '' for header in msg['headers']: if header['name'] == 'Subject': subject = header['value'] break if subject == target: if thread[u'snippet'][0:8] == trigcode: # If user replies with trigcode flag = True break if flag: # If user replies with trigcode break nt = strftime('%Y-%m-%d %H:%M:%S', localtime()) print('Still listening: ' + nt) sleep(30) print('Alert!') # Emulated alert else: print('Not a stranger') # Do nothing # Delete all images from s3 server for img in source_name: s3.delete_object(Bucket=bucket_name, Key=img) s3.delete_object(Bucket=bucket_name, Key=target_name) os.remove('face.jpg') # Delete temperary image sleep(10) if __name__ == '__main__': main()
2.203125
2
unpythonic/syntax/autoref.py
aisha-w/unpythonic
0
11518
# -*- coding: utf-8 -*- """Implicitly reference attributes of an object.""" from ast import Name, Assign, Load, Call, Lambda, With, Str, arg, \ Attribute, Subscript, Store, Del from macropy.core.quotes import macros, q, u, name, ast_literal from macropy.core.hquotes import macros, hq from macropy.core.walkers import Walker from .util import wrapwith, AutorefMarker from .letdoutil import isdo, islet, ExpandedDoView, ExpandedLetView from ..dynassign import dyn from ..lazyutil import force1, mark_lazy # with autoref(o): # with autoref(scipy.loadmat("mydata.mat")): # evaluate once, assign to a gensym # with autoref(scipy.loadmat("mydata.mat")) as o: # evaluate once, assign to given name # # We need something like:: # # with autoref(o): # x # --> (o.x if hasattr(o, "x") else x) # x.a # --> (o.x.a if hasattr(o, "x") else x.a) # x[s] # --> (o.x[s] if hasattr(o, "x") else x[s]) # o # --> o # with autoref(p): # x # --> (p.x if hasattr(p, "x") else (o.x if hasattr(o, "x") else x)) # x.a # --> (p.x.a if hasattr(p, "x") else (o.x.a if hasattr(o, "x") else x.a)) # x[s] # --> (p.x[s] if hasattr(p, "x") else (o.x[s] if hasattr(o, "x") else x[s])) # o # --> (p.o if hasattr(p, "o") else o) # o.x # --> (p.o.x if hasattr(p, "o") else o.x) # o[s] # --> (p.o[s] if hasattr(p, "o") else o[s]) # # One possible clean-ish implementation is:: # # with AutorefMarker("o"): # no-op at runtime # x # --> (lambda _ar271: _ar271[1] if _ar271[0] else x)(_autoref_resolve((o, "x"))) # x.a # --> ((lambda _ar271: _ar271[1] if _ar271[0] else x)(_autoref_resolve((o, "x")))).a # x[s] # --> ((lambda _ar271: _ar271[1] if _ar271[0] else x)(_autoref_resolve((o, "x"))))[s] # o # --> o (can only occur if an asname is supplied) # with AutorefMarker("p"): # x # --> (lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x"))) # x.a # --> ((lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x"))).a # x[s] # --> ((lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x")))[s] # # when the inner autoref expands, it doesn't know about the outer one, so we will get this: # o # --> (lambda _ar314: _ar314[1] if _ar314[0] else o)(_autoref_resolve((p, "o"))) # o.x # --> ((lambda _ar314: _ar314[1] if _ar314[0] else o)(_autoref_resolve((p, "o")))).x # o[s] # --> ((lambda _ar314: _ar314[1] if _ar314[0] else o)(_autoref_resolve((p, "o"))))[s] # # the outer autoref needs the marker to know to skip this (instead of looking up o.p): # p # --> p # # The lambda is needed, because the lexical-variable lookup for ``x`` must occur at the use site, # and it can only be performed by Python itself. We could modify ``_autoref_resolve`` to take # ``locals()`` and ``globals()`` as arguments and look also in the ``builtins`` module, # but that way we get no access to the enclosing scopes (the "E" in LEGB). # # Recall the blocks expand from inside out. # # We must leave an AST marker in place of the each autoref block, so that any outer autoref block (when it expands) # understands that within that block, any read access to the name "p" is to be left alone. # # In ``_autoref_resolve``, we use a single args parameter to avoid dealing with ``*args`` # when analyzing the Call node, thus avoiding much special-case code for the AST differences # between Python 3.4 and 3.5+. # # In reality, we also capture-and-assign the autoref'd expr into a gensym'd variable (instead of referring # to ``o`` and ``p`` directly), so that arbitrary expressions can be autoref'd without giving them # a name in user code. @mark_lazy def _autoref_resolve(args): *objs, s = [force1(x) for x in args] for o in objs: if hasattr(o, s): return True, force1(getattr(o, s)) return False, None def autoref(block_body, args, asname): assert len(args) == 1, "expected exactly one argument, the expr to implicitly reference" assert block_body, "expected at least one statement inside the 'with autoref' block" gen_sym = dyn.gen_sym o = asname.id if asname else gen_sym("_o") # Python itself guarantees asname to be a bare Name. # with AutorefMarker("_o42"): def isexpandedautorefblock(tree): if not (type(tree) is With and len(tree.items) == 1): return False ctxmanager = tree.items[0].context_expr return type(ctxmanager) is Call and \ type(ctxmanager.func) is Name and ctxmanager.func.id == "AutorefMarker" and \ len(ctxmanager.args) == 1 and type(ctxmanager.args[0]) is Str def getreferent(tree): return tree.items[0].context_expr.args[0].s # (lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x"))) def isautoreference(tree): return type(tree) is Call and \ len(tree.args) == 1 and type(tree.args[0]) is Call and \ type(tree.args[0].func) is Name and tree.args[0].func.id == "_autoref_resolve" and \ type(tree.func) is Lambda and len(tree.func.args.args) == 1 and \ tree.func.args.args[0].arg.startswith("_ar") def get_resolver_list(tree): # (p, o, "x") return tree.args[0].args[0].elts def add_to_resolver_list(tree, objnode): lst = get_resolver_list(tree) lst.insert(-1, objnode) # x --> the autoref code above. def makeautoreference(tree): assert type(tree) is Name and (type(tree.ctx) is Load or not tree.ctx) newtree = hq[(lambda __ar_: __ar_[1] if __ar_[0] else ast_literal[tree])(_autoref_resolve((name[o], u[tree.id])))] our_lambda_argname = gen_sym("_ar") @Walker def renametmp(tree, **kw): if type(tree) is Name and tree.id == "__ar_": tree.id = our_lambda_argname elif type(tree) is arg and tree.arg == "__ar_": tree.arg = our_lambda_argname return tree return renametmp.recurse(newtree) @Walker def transform(tree, *, referents, set_ctx, stop, **kw): if type(tree) in (Attribute, Subscript, Name) and type(tree.ctx) in (Store, Del): stop() # skip autoref lookup for let/do envs elif islet(tree): view = ExpandedLetView(tree) set_ctx(referents=referents + [view.body.args.args[0].arg]) # lambda e14: ... elif isdo(tree): view = ExpandedDoView(tree) set_ctx(referents=referents + [view.body[0].args.args[0].arg]) # lambda e14: ... elif isexpandedautorefblock(tree): set_ctx(referents=referents + [getreferent(tree)]) elif isautoreference(tree): # generated by an inner already expanded autoref block stop() thename = get_resolver_list(tree)[-1].s if thename in referents: # remove autoref lookup for an outer referent, inserted early by an inner autoref block # (that doesn't know that any outer block exists) tree = q[name[thename]] # (lambda ...)(_autoref_resolve((p, "o"))) --> o else: add_to_resolver_list(tree, q[name[o]]) # _autoref_resolve((p, "x")) --> _autoref_resolve((p, o, "x")) elif type(tree) is Call and type(tree.func) is Name and tree.func.id == "AutorefMarker": # nested autorefs stop() elif type(tree) is Name and (type(tree.ctx) is Load or not tree.ctx) and tree.id not in referents: stop() tree = makeautoreference(tree) # Attribute works as-is, because a.b.c --> Attribute(Attribute(a, "b"), "c"), so Name "a" gets transformed. # Subscript similarly, a[1][2] --> Subscript(Subscript(a, 1), 2), so Name "a" gets transformed. return tree # skip (by name) some common references inserted by other macros always_skip = ['letter', 'dof', 'namelambda', 'curry', 'currycall', 'lazy', 'lazyrec', 'lazycall'] newbody = [Assign(targets=[q[name[o]]], value=args[0])] for stmt in block_body: newbody.append(transform.recurse(stmt, referents=always_skip + [o])) return wrapwith(item=hq[AutorefMarker(u[o])], body=newbody, locref=block_body[0])
2.390625
2
doit/exceptions.py
m4ta1l/doit
1,390
11519
<reponame>m4ta1l/doit """Handle exceptions generated from 'user' code""" import sys import traceback class InvalidCommand(Exception): """Invalid command line argument.""" def __init__(self, *args, **kwargs): self.not_found = kwargs.pop('not_found', None) super(InvalidCommand, self).__init__(*args, **kwargs) self.cmd_used = None self.bin_name = 'doit' # default but might be overwriten def __str__(self): if self.not_found is None: return super(InvalidCommand, self).__str__() if self.cmd_used: msg_task_not_found = ( 'command `{cmd_used}` invalid parameter: "{not_found}".' + ' Must be a task, or a target.\n' + 'Type "{bin_name} list" to see available tasks') return msg_task_not_found.format(**self.__dict__) else: msg_cmd_task_not_found = ( 'Invalid parameter: "{not_found}".' + ' Must be a command, task, or a target.\n' + 'Type "{bin_name} help" to see available commands.\n' + 'Type "{bin_name} list" to see available tasks.\n') return msg_cmd_task_not_found.format(**self.__dict__) class InvalidDodoFile(Exception): """Invalid dodo file""" pass class InvalidTask(Exception): """Invalid task instance. User error on specifying the task.""" pass class CatchedException(object): """This used to save info from caught exceptions The traceback from the original exception is saved """ def __init__(self, msg, exception=None): self.message = msg self.traceback = '' if isinstance(exception, CatchedException): self.traceback = exception.traceback elif exception is not None: # TODO remove doit-code part from traceback self.traceback = traceback.format_exception( exception.__class__, exception, sys.exc_info()[2]) def get_msg(self): """return full exception description (includes traceback)""" return "%s\n%s" % (self.message, "".join(self.traceback)) def get_name(self): """get Exception name""" return self.__class__.__name__ def __repr__(self): return "(<%s> %s)" % (self.get_name(), self.message) def __str__(self): return "%s\n%s" % (self.get_name(), self.get_msg()) class TaskFailed(CatchedException): """Task execution was not successful.""" pass class UnmetDependency(TaskFailed): """Task was not executed because a dependent task failed or is ignored""" pass class TaskError(CatchedException): """Error while trying to execute task.""" pass class SetupError(CatchedException): """Error while trying to execute setup object""" pass class DependencyError(CatchedException): """Error while trying to check if task is up-to-date or saving task status""" pass
3.0625
3
python/redmonster/tools/plot_fits.py
timahutchinson/redmonster
5
11520
<reponame>timahutchinson/redmonster # GUI used for quickly plotting BOSS spectra. Also allows overplotting of best-fit template as # determined by redmonster pipeline. Sort of a redmonster version of plotspec.pro, though currently # with less bells and whistles. # # <NAME>, University of Utah, April 2014 # Signifcantly updated by TH, October 2014 # # <EMAIL> from os import environ from os.path import join, exists try: from tkinter import * except ImportError: from Tkinter import * import numpy as n import matplotlib matplotlib.use('Agg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \ NavigationToolbar2TkAgg from matplotlib.figure import Figure from astropy.io import fits from astropy.convolution import convolve, Box1DKernel import seaborn as sns sns.set_style('whitegrid') from redmonster.physics.misc import poly_array class PlotFit(Frame): def __init__ (self): self.root = Tk() self.ablinelist = [ 3890.2, 3933.7, 3968.5, 4102.9, 4307, 4341.7, 4862.7, 5175, 5889, 5895 ] self.ablinenames = [ r'H$\epsilon$','Ca K', 'Ca H', r'H$\delta$', 'Ca G', r'H$\gamma$', r'H$\beta$', 'Mg I', 'Na I', 'Na I' ] self.emlinelist = [2500] self.emlinenames = ['test emline'] self.plate = None self.mjd = None # plate = StringVar() plate.set('7848') mjd = StringVar() mjd.set('56959') # L1 = Label(self.root, text='Plate') L1.grid(sticky=E) L2 = Label(self.root, text='MJD') L2.grid(sticky=E) L3 = Label(self.root, text='Fiber') L3.grid(stick=E) L5 = Label(self.root, text='z num') L5.grid(stick=E) self.e1 = Entry(self.root, textvariable=plate) self.e1.bind() self.e1.grid(row=0, column=1) self.e2 = Entry(self.root, textvariable=mjd) self.e2.grid(row=1, column=1) fiber = StringVar() fiber.set('0') self.e3 = Entry(self.root, textvariable=fiber) self.e3.grid(row=2, column=1) znum = StringVar() znum.set('1') self.e5 = Entry(self.root, textvariable=znum) self.e5.grid(row=3, column=1) nextz = Button(self.root, text='+', command=self.next_z) nextz.grid(row=3, column=4) prevz = Button(self.root, text='-', command=self.prev_z) prevz.grid(row=3, column=3) self.var = BooleanVar() self.var.set(1) self.restframe = BooleanVar() self.restframe.set(0) self.ablines = BooleanVar() self.ablines.set(0) self.emlines = BooleanVar() self.emlines.set(0) c = Checkbutton(self.root, text='Overplot best-fit model', variable=self.var) c.grid(row=4, column=1) restframe = Checkbutton(self.root, text='Rest-frame wavelength', variable=self.restframe) restframe.grid(row=5,column=1) ablines = Checkbutton(self.root, text='Show absorption lines ', variable=self.ablines) ablines.grid(row=6, column=1) emlines = Checkbutton(self.root, text='Show emission lines ', variable=self.emlines) emlines.grid(row=7, column=1) # smooth = StringVar() smooth.set('5') L4 = Label(self.root, text='Smooth') L4.grid(sticky=E) self.e4 = Entry(self.root, textvariable=smooth) self.e4.grid(row=8, column=1) plot = Button(self.root, text='Plot', command=self.do_plot) plot.grid(row=9, column=1) qbutton = Button(self.root, text='QUIT', fg='red', command=self.root.destroy) qbutton.grid(row=10, column=1) nextfiber = Button(self.root, text='>', command=self.next_fiber) nextfiber.grid(row=2, column=4) prevfiber = Button(self.root, text='<', command=self.prev_fiber) prevfiber.grid(row=2, column=3) Frame.__init__(self,self.root) self.root.mainloop() def do_plot(self): if self.plate != int(self.e1.get()) or self.mjd != int(self.e2.get()): self.plate = int(self.e1.get()) self.mjd = int(self.e2.get()) self.fiber = int(self.e3.get()) self.znum = int(self.e5.get()) self.platepath = join(environ['BOSS_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, 'spPlate-%s-%s.fits' % (self.plate, self.mjd)) hdu = fits.open(self.platepath) self.specs = hdu[0].data self.wave = 10**(hdu[0].header['COEFF0'] + n.arange(hdu[0].header['NAXIS1']) * hdu[0].header['COEFF1']) self.models = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[2].data self.fiberid = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.FIBERID self.type1 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.CLASS1 self.type2 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.CLASS2 self.type3 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.CLASS3 self.type4 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.CLASS4 self.type5 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.CLASS5 self.z = n.zeros((self.fiberid.shape[0],5)) self.z[:,0] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.Z1 self.z[:,1] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.Z2 self.z[:,2] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.Z3 self.z[:,3] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.Z4 self.z[:,4] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.Z5 self.zwarning = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], '%s' % self.plate, environ['RUN1D'], 'redmonster-%s-%s.fits' % (self.plate, self.mjd)))[1].data.ZWARNING else: self.fiber = int(self.e3.get()) f = Figure(figsize=(10,6), dpi=100) a = f.add_subplot(111) loc = n.where(self.fiberid == self.fiber)[0] if self.znum == 1: z = self.z[loc[0],0] thistype = self.type1[loc[0]] elif self.znum == 2: z = self.z[loc[0],1] thistype = self.type2[loc[0]] elif self.znum == 3: z = self.z[loc[0],2] thistype = self.type3[loc[0]] elif self.znum == 4: z = self.z[loc[0],3] thistype = self.type4[loc[0]] elif self.znum == 5: z = self.z[loc[0],4] thistype = self.type5[loc[0]] if self.var.get() == 0: if self.restframe.get() == 0: a.plot(self.wave, self.specs[self.fiber], color='black') elif self.restframe.get() == 1: a.plot(self.wave/(1+self.z[loc][0]), self.specs[self.fiber], color='black') elif self.var.get() == 1: smooth = self.e4.get() if smooth is '': if self.restframe.get() == 0: a.plot(self.wave, self.specs[self.fiber], color='black') elif self.restframe.get() == 1: a.plot(self.wave/(1+z), self.specs[self.fiber], color='black') else: if self.restframe.get() == 0: a.plot(self.wave, convolve(self.specs[self.fiber], Box1DKernel(int(smooth))), color='black') elif self.restframe.get() == 1: a.plot(self.wave/(1+z), convolve(self.specs[self.fiber], Box1DKernel(int(smooth))), color='black') # Overplot model if len(loc) is not 0: if self.restframe.get() == 0: #a.plot(self.wave, self.models[loc[0]], color='black') # This for when multiple models are in redmonster file a.plot(self.wave, self.models[loc[0],self.znum-1], color='cyan') if self.ablines.get() == 1: for i, line in enumerate(self.ablinelist): if ((line*(1+z) > self.wave[0]) & (line*(1+z) < self.wave[-1])): a.axvline(line*(1+z), color='blue', linestyle='--', label=self.ablinenames[i]) if self.emlines.get() == 1: for i, line in enumerate(self.emlinelist): if (line*(1+z) > self.wave[0]) & (line*(1+z) < \ self.wave[-1]): a.axvline(line*(1+z), color='red', linestyle='--', label=self.emlinenames[i]) if self.ablines.get() == 1 or self.emlines.get() == 1: a.legend(prop={'size':10}) elif self.restframe.get() == 1: a.plot(self.wave/(1+z), self.models[loc[0],self.znum-1], color='cyan') if self.ablines.get() == 1: for i, line in enumerate(self.ablinelist): if (line > self.wave[0]) & (line < self.wave[-1]): a.axvline(line, color='blue', linestyle='--', label=self.ablinenames[i]) if self.emlines.get() == 1: for i, line in enumerate(self.emlinelist): if (line > self.wave[0]) & (line < self.wave[-1]): a.axvline(line, color='red', linestyle='--', label=self.emlinenames[i]) if self.ablines.get() == 1 or self.emlines.get() == 1: a.legend(prop={'size':10}) a.set_title('Plate %s Fiber %s: z=%s class=%s zwarning=%s' % (self.plate, self.fiber, z, thistype, self.zwarning[loc[0]])) else: print('Fiber %s is not in redmonster-%s-%s.fits' % \ (self.fiber, self.plate, self.mjd)) a.set_title('Plate %s Fiber %s' % (self.plate, self.fiber)) if self.restframe.get() == 1: lower_data, upper_data = self.set_limits() a.axis([self.wave[0]/(1+z)-100,self.wave[-1]/(1+z)+100, lower_data,upper_data]) elif self.restframe.get() == 0: lower_data, upper_data = self.set_limits() a.axis([self.wave[0]-100,self.wave[-1]+100,lower_data,upper_data]) a.set_xlabel('Wavelength ($\AA$)') a.set_ylabel('Flux ($10^{-17} erg\ cm^2 s^{-1} \AA^{-1}$)') canvas = FigureCanvasTkAgg(f, master=self.root) canvas.get_tk_widget().grid(row=0, column=5, rowspan=20) toolbar_frame = Frame(self.root) toolbar_frame.grid(row=20,column=5) toolbar = NavigationToolbar2TkAgg( canvas, toolbar_frame ) canvas.show() def next_fiber(self): self.fiber += 1 self.e3.delete(0, END) self.e3.insert(0, str(self.fiber)) self.do_plot() def prev_fiber(self): self.fiber -= 1 self.e3.delete(0, END) self.e3.insert(0, str(self.fiber)) self.do_plot() def next_z(self): if (self.znum >= 1) & (self.znum < 5): self.znum += 1 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() else: if self.znum < 1: self.znum = 1 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() elif self.znum >= 5: self.znum = 5 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() else: self.znum = 1 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() def prev_z(self): if (self.znum > 1) & (self.znum <= 5): self.znum -= 1 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() else: if self.znum <= 1: self.znum = 1 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() elif self.znum > 5: self.znum = 5 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() else: self.znum = 1 self.e5.delete(0, END) self.e5.insert(0, str(self.znum)) self.do_plot() def set_limits(self, percentile=.95): sorted_flux = n.sort( self.specs[self.fiber] ) bottom_ind = int(n.floor((1-percentile)/2. * sorted_flux.shape[0])) top_ind = n.ceil(sorted_flux.shape[0] - bottom_ind) return sorted_flux[bottom_ind], sorted_flux[top_ind] app = PlotFit()
2.1875
2
QueueReconstruction.py
yashpatel0369/PythonDataStructures
0
11521
<filename>QueueReconstruction.py # An algorithm to reconstruct the queue. # Suppose you have a random list of people standing in a queue. # Each person is described by a pair of integers (h,k), where h is the height of the person and k is the number of people in front of this person who have a height greater than or equal to h. class Solution: def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]: people = sorted(people, key = lambda x: (-x[0], x[1])) ans = [] for pep in people: ans.insert(pep[1], pep) return ans
4
4
passagens/models/classe_viagem.py
carlosrjhoe/Aplicacao_Formulario_com_Django
0
11522
<reponame>carlosrjhoe/Aplicacao_Formulario_com_Django<gh_stars>0 from django.db import models from django.utils.translation import gettext_lazy as _ class ClasseViagem(models.TextChoices): ECONOMICA = 'ECO', _('Econômica') EXECUTIVA = 'EXE', _('Executiva') PRIMEIRA_CLASSE = 'PRI', _('Primeira')
1.960938
2
tests/unit/test_HashlistsByAlgLoaderThread.py
AntonKuzminRussia/hbs-cli
5
11523
# -*- coding: utf-8 -*- """ This is part of HashBruteStation software Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station License: MIT Copyright (c) <NAME> <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en) Integration tests for HashlistsByAlgLoaderThread """ import sys import os import time import pytest sys.path.append('../../') from libs.common import file_get_contents, md5 from classes.HashlistsByAlgLoaderThread import HashlistsByAlgLoaderThread from CommonUnit import CommonUnit class Test_HashlistsByAlgLoaderThread(CommonUnit): """ Unit tests for HashlistsByAlgLoaderThread """ db = None thrd = None def setup(self): """ Tests setup """ self._clean_db() self.thrd = HashlistsByAlgLoaderThread() self.thrd.catch_exceptions = False def teardown(self): """ Tests teardown """ if isinstance(self.thrd, HashlistsByAlgLoaderThread): self.thrd.available = False time.sleep(1) del self.thrd self._clean_db() def test_get_common_hashlist_id_by_alg_get(self): """ Test get_common_hashlist_id_by_alg_get() """ self._add_hashlist(have_salts=1, common_by_alg=3) assert self.thrd.get_common_hashlist_id_by_alg(3) == 1 test_data = [ ( 1, {'hash': 'a', 'salt': '1', 'summ': md5('a:1')}, ), ( 0, {'hash': 'a', 'salt': '', 'summ': md5('a')}, ), ] @pytest.mark.parametrize("have_salt,_hash", test_data) def test_get_common_hashlist_id_by_alg_create(self, have_salt, _hash): """ Test get_common_hashlist_id_by_alg_create() :param have_salt: does hashlist has salt? :param _hash: hash data row :return: """ self._add_hashlist(have_salts=have_salt, common_by_alg=0) self._add_hash(hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ']) assert self.thrd.get_common_hashlist_id_by_alg(3) == 2 test_hashlist_data = {'id': 2, 'name': 'All-MD4', 'have_salts': have_salt, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 0, 'errors': '', 'parsed': 0, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE id = 2") for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] def test_get_common_hashlist_id_by_alg_with_salt_create_one_salt_forget(self): """ Test get_common_hashlist_id_by_alg_create() """ self._add_hashlist(have_salts=1, common_by_alg=0) self._add_hash(hash='a', salt='b', summ='333') self._add_hashlist(id=2, have_salts=0, common_by_alg=0) self._add_hash(hashlist_id=2, hash='c', salt='d', summ='111') assert self.thrd.get_common_hashlist_id_by_alg(3) == 3 test_hashlist_data = {'id': 3, 'name': 'All-MD4', 'have_salts': 1, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 0, 'errors': '', 'parsed': 0, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE id = 3") for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] def test_get_current_work_hashlist(self): """ Test get_current_work_hashlist() """ assert not self.thrd.get_current_work_hashlist() self.db.insert("task_works", {'hashlist_id': 3, 'status': 'work', 'task_id': 1}) assert self.thrd.get_current_work_hashlist() == 3 def test_get_hashlist_status(self): """ Test get_hashlist_status() """ self._add_hashlist(common_by_alg=1) assert self.thrd.get_hashlist_status(1) == 'ready' def test_is_alg_in_parse(self): """ Test is_alg_in_parse() """ assert self.thrd.is_alg_in_parse(3) is False self._add_hashlist(common_by_alg=1) self.db.insert("task_works", {'hashlist_id': 1, 'status': 'waitoutparse', 'task_id': 1}) assert self.thrd.is_alg_in_parse(3) is True assert self.thrd.is_alg_in_parse(4) is False self._add_hashlist(id=2, alg_id=4, common_by_alg=1) self.db.insert("task_works", {'hashlist_id': 2, 'status': 'outparsing', 'task_id': 1}) assert self.thrd.is_alg_in_parse(4) is True def test_hashes_count_in_hashlist(self): """ Test hashes_count_in_hashlist() """ assert self.thrd.hashes_count_in_hashlist(1) == 0 self._add_hash() assert self.thrd.hashes_count_in_hashlist(1) == 1 def test_hashes_count_by_algs(self): """ Test hashes_count_by_algs() """ assert self.thrd.hashes_count_by_algs() == {} self._add_hashlist() self._add_hash(summ='111') self._add_hash(summ='222', hash='a', salt='b') self._add_hashlist(id=2, alg_id=4) self._add_hash(hashlist_id=2, summ='333') assert self.thrd.hashes_count_by_algs() == {3: 2, 4: 1} def test_is_alg_have_salts(self): """ Test is_alg_have_salts() """ self._add_hashlist() assert self.thrd.is_alg_have_salts(3) is False self._add_hashlist(id=2, have_salts=1) # Forget salt bug assert self.thrd.is_alg_have_salts(3) is True def test_get_possible_hashlist_and_alg_simple(self): """ Test get_possible_hashlist_and_alg_simple() """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') assert self.thrd.get_possible_hashlist_and_alg() == {'hashlist_id': 2, 'alg_id': 3} def test_get_possible_hashlist_and_alg_none_already(self): """ Test get_possible_hashlist_and_alg_none_already() """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') self._add_hashlist(id=2, common_by_alg=3) self._add_hash(hashlist_id=2, hash='a', summ='111') self._add_hash(hashlist_id=2, hash='b', summ='222') assert self.thrd.get_possible_hashlist_and_alg() is None def test_get_possible_hashlist_and_alg_none_in_parse(self): """ Test get_possible_hashlist_and_alg_none_in_parse() """ self.db.insert("task_works", {'hashlist_id': 1, 'status': 'waitoutparse', 'task_id': 1}) self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') assert self.thrd.get_possible_hashlist_and_alg() is None self.db.update("task_works", {'status': 'outparsing'}, "id=1") assert self.thrd.get_possible_hashlist_and_alg() is None def test_get_possible_hashlist_and_alg_none_not_ready(self): """ Test get_possible_hashlist_and_alg_none_not_ready() """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') self._add_hashlist(id=2, status='wait', common_by_alg=3) assert self.thrd.get_possible_hashlist_and_alg() is None def test_get_possible_hashlist_and_alg_none_in_work(self): """ Test get_possible_hashlist_and_alg_none_in_work() """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') self._add_hashlist(id=2, common_by_alg=3) self.db.insert("task_works", {'hashlist_id': 2, 'status': 'work', 'task_id': 1}) assert self.thrd.get_possible_hashlist_and_alg() is None def test_clean_old_hashes(self): """ Test clean_old_hashes() """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') assert self.db.fetch_one("SELECT COUNT(*) FROM hashes WHERE hashlist_id = 1") == 2 self.thrd.clean_old_hashes(1) assert self.db.fetch_one("SELECT COUNT(*) FROM hashes WHERE hashlist_id = 1") == 0 assert self.db.fetch_one("SELECT cracked+uncracked FROM hashlists WHERE id = 1") == 0 def test_put_all_hashes_of_alg_in_file(self): """ Test put_all_hashes_of_alg_in_file() """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(summ='222') self._add_hash(hash='b', summ='333') path = self.thrd.put_all_hashes_of_alg_in_file(3) assert os.path.exists(path) assert file_get_contents(path) == 'a\nb\n' self._add_hashlist(id=2, have_salts=1, alg_id=4) self._add_hash(hashlist_id=2, hash='a', salt='b', summ='111') self._add_hash(hashlist_id=2, summ='222') self._add_hash(hashlist_id=2, hash='c', salt='d', summ='333') path = self.thrd.put_all_hashes_of_alg_in_file(4) assert os.path.exists(path) assert file_get_contents(path) == 'a{0}b\nc{0}d\n'.format(self.thrd.DELIMITER)
2.265625
2
simple_ddl_parser/tokens.py
burakuyar/simple-ddl-parser
46
11524
# statements that used at the start of defenition or in statements without columns defenition_statements = { "DROP": "DROP", "CREATE": "CREATE", "TABLE": "TABLE", "DATABASE": "DATABASE", "SCHEMA": "SCHEMA", "ALTER": "ALTER", "TYPE": "TYPE", "DOMAIN": "DOMAIN", "REPLACE": "REPLACE", "OR": "OR", "CLUSTERED": "CLUSTERED", "SEQUENCE": "SEQUENCE", "TABLESPACE": "TABLESPACE", } common_statements = { "INDEX": "INDEX", "REFERENCES": "REFERENCES", "KEY": "KEY", "ADD": "ADD", "AS": "AS", "CLONE": "CLONE", "DEFERRABLE": "DEFERRABLE", "INITIALLY": "INITIALLY", "IF": "IF", "NOT": "NOT", "EXISTS": "EXISTS", "ON": "ON", "FOR": "FOR", "ENCRYPT": "ENCRYPT", "SALT": "SALT", "NO": "NO", "USING": "USING", # bigquery "OPTIONS": "OPTIONS", } columns_defenition = { "DELETE": "DELETE", "UPDATE": "UPDATE", "NULL": "NULL", "ARRAY": "ARRAY", ",": "COMMA", "DEFAULT": "DEFAULT", "COLLATE": "COLLATE", "ENFORCED": "ENFORCED", "ENCODE": "ENCODE", "GENERATED": "GENERATED", "COMMENT": "COMMENT", } first_liners = { "LIKE": "LIKE", "CONSTRAINT": "CONSTRAINT", "FOREIGN": "FOREIGN", "PRIMARY": "PRIMARY", "UNIQUE": "UNIQUE", "CHECK": "CHECK", "WITH": "WITH", } common_statements.update(first_liners) defenition_statements.update(common_statements) after_columns_tokens = { "PARTITIONED": "PARTITIONED", "PARTITION": "PARTITION", "BY": "BY", # hql "INTO": "INTO", "STORED": "STORED", "LOCATION": "LOCATION", "ROW": "ROW", "FORMAT": "FORMAT", "TERMINATED": "TERMINATED", "COLLECTION": "COLLECTION", "ITEMS": "ITEMS", "MAP": "MAP", "KEYS": "KEYS", "SERDE": "SERDE", "CLUSTER": "CLUSTER", "SERDEPROPERTIES": "SERDEPROPERTIES", "TBLPROPERTIES": "TBLPROPERTIES", "SKEWED": "SKEWED", # oracle "STORAGE": "STORAGE", "TABLESPACE": "TABLESPACE", # mssql "TEXTIMAGE_ON": "TEXTIMAGE_ON", } sequence_reserved = { "INCREMENT": "INCREMENT", "START": "START", "MINVALUE": "MINVALUE", "MAXVALUE": "MAXVALUE", "CACHE": "CACHE", "NO": "NO", } tokens = tuple( set( ["ID", "DOT", "STRING", "DQ_STRING", "LP", "RP", "LT", "RT", "COMMAT"] + list(defenition_statements.values()) + list(common_statements.values()) + list(columns_defenition.values()) + list(sequence_reserved.values()) + list(after_columns_tokens.values()) ) ) symbol_tokens = { ")": "RP", "(": "LP", } symbol_tokens_no_check = {"<": "LT", ">": "RT"}
2.21875
2
rmexp/worker.py
junjuew/scalable-edge-native-applications
1
11525
from __future__ import absolute_import, division, print_function import json import logging import os import time import importlib import multiprocessing import cv2 import fire import logzero from logzero import logger import numpy as np from rmexp import config, cvutils, dbutils, gabriel_pb2, client from rmexp.schema import models logzero.formatter(logging.Formatter( fmt='%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%H:%M:%S')) logzero.loglevel(logging.DEBUG) def work_loop(job_queue, app, busy_wait=None): """[summary] Arguments: job_queue {[type]} -- [description] app {[type]} -- [description] Keyword Arguments: busy_wait {float} -- if not None, busy spin seconds instead of running actual app (default: {None}) """ handler = importlib.import_module(app).Handler() while True: get_ts = time.time() msg = job_queue.get()[0] get_wait = time.time() - get_ts if get_wait > 2e-3: logger.warn("[pid {}] took {} ms to get a new request. Maybe waiting".format( os.getpid(), int(1000 * get_wait))) arrival_ts = time.time() gabriel_msg = gabriel_pb2.Message() gabriel_msg.ParseFromString(msg) encoded_im, ts = gabriel_msg.data, gabriel_msg.timestamp logger.debug("[pid {}] about to process frame {}".format( os.getpid(), gabriel_msg.index)) cts = time.clock() if not busy_wait: # do real work encoded_im_np = np.frombuffer(encoded_im, dtype=np.uint8) img = cv2.imdecode(encoded_im_np, cv2.CV_LOAD_IMAGE_UNCHANGED) result = handler.process(img) else: # busy wait fixed time tic = time.time() while True: if time.time() - tic > busy_wait: break result = 'busy wait {}'.format(busy_wait) finished_ts = time.time() time_lapse = (finished_ts - ts) * 1000 cpu_proc_ms = round((time.clock() - cts) * 1000) if gabriel_msg.reply: reply = gabriel_pb2.Message() reply.data = str(result) reply.timestamp = gabriel_msg.timestamp reply.index = gabriel_msg.index reply.finished_ts = finished_ts reply.arrival_ts = arrival_ts reply.cpu_proc_ms = cpu_proc_ms job_queue.put([reply.SerializeToString(), ]) logger.debug('[pid {}] takes {} ms (cpu: {} ms) for frame {}: {}.'.format( os.getpid(), (time.time() - ts) * 1000, cpu_proc_ms, gabriel_msg.index, result)) class Sampler(object): """A Class to sample video stream. Designed to work with cam.read(). Sample once every sample_period calls """ def __init__(self, sample_period, sample_func=None): super(Sampler, self).__init__() self._sp = sample_period assert(type(sample_period) is int and sample_period > 0) self._sf = sample_func self._cnt = 0 def sample(self): while True: self._cnt = (self._cnt + 1) % self._sp if self._cnt == 0: return self._sf() self._sf() def process_and_time(img, app_handler): ts = time.time() result = app_handler.process(img) time_lapse = int(round((time.time() - ts) * 1000)) return result, time_lapse def store( data, session, store_result, store_latency, store_profile, **kwargs): name, trace, idx, result, time_lapse = data if store_result: rec, _ = dbutils.get_or_create( session, models.SS, name=name, index=idx, trace=trace) rec.val = str(result) if store_latency: rec, _ = dbutils.get_or_create( session, models.LegoLatency, name=name, index=idx) rec.val = int(time_lapse) if store_profile: rec = kwargs rec.update( {'trace': trace, 'index': idx, 'name': name, 'latency': time_lapse } ) dbutils.insert( session, models.ResourceLatency, rec ) def batch_process(video_uri, app, experiment_name, trace=None, store_result=False, store_latency=False, store_profile=False, **kwargs): """Batch process a video. Able to store both the result and the frame processing latency. Arguments: video_uri {string} -- Video URI app {string} -- Applicaiton name experiment_name {string} -- Experiment name Keyword Arguments: trace {string} -- Trace id store_result {bool} -- Whether to store result into database store_result {bool} -- [description] (default: {False}) store_latency {bool} -- [description] (default: {False}) cpu {string} -- No of CPUs used. Used to populate profile database memory {string} -- No of memory used. Used to populate profile database num_worker {int} -- No of simultaneous workers. Used to populate profile database """ if trace is None: trace = os.path.basename(os.path.dirname(video_uri)) app = importlib.import_module(app) app_handler = app.Handler() vc = client.VideoClient( app.__name__, video_uri, None, loop=False, random_start=False) idx = 1 with dbutils.session_scope() as session: for img in vc.get_frame_generator(): cpu_time_ts = time.clock() result, time_lapse = process_and_time(img, app_handler) logger.debug("[pid: {}] processing frame {} from {}. {} ms".format(os.getpid(), idx, video_uri, int(time_lapse))) logger.debug(result) store( (experiment_name, trace, idx, result, time_lapse), session, store_result, store_latency, store_profile, **kwargs ) idx += 1 def phash(video_uri): cam = cv2.VideoCapture(video_uri) has_frame = True with dbutils.session_scope(dry_run=False) as sess: trace_name = os.path.basename(os.path.dirname(video_uri)) idx = 1 while has_frame: has_frame, img = cam.read() if img is not None: cur_hash = cvutils.phash(img) sess.add(models.SS( name='{}-f{}-phash'.format(trace_name, idx), val=str(cur_hash), trace=trace_name)) idx += 1 def phash_diff_adjacent_frame(video_uri, output_dir): cam = cv2.VideoCapture(video_uri) os.makedirs(output_dir) has_frame = True prev_hash = None idx = 1 logger.debug('calculating phash diff for adjacent frames') while has_frame: has_frame, img = cam.read() if img is not None: cur_hash = cvutils.phash(img) if prev_hash is not None: diff = cur_hash - prev_hash cv2.putText(img, 'diff={}'.format( diff), (int(img.shape[1] / 3), img.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), thickness=5) cv2.imwrite(os.path.join( output_dir, '{:010d}.jpg'.format(idx)), img) logger.debug(diff) prev_hash = cur_hash idx += 1 if __name__ == "__main__": fire.Fire()
2.1875
2
app/requests/users.py
codingedward/book-a-meal-api
0
11526
from .base import JsonRequest class PostRequest(JsonRequest): @staticmethod def rules(): return { 'email': 'required|email|unique:User,email', 'password': '<PASSWORD>', 'username': 'required|alpha|least_string:3', 'role': 'integer|positive|found_in:1,2', } class PutRequest(JsonRequest): @staticmethod def rules(): return { 'password': '<PASSWORD>', 'username': 'alpha|least_string:3', 'role': 'integer|positive|found_in:1,2', }
2.578125
3
kmp.py
mutux/kmp
3
11527
<gh_stars>1-10 def kmp(P, T): # Compute the start position (number of chars) of the longest suffix that matches a prefix, # and store them into list K, the first element of K is set to be -1, the second # K = [] # K[t] store the value that when mismatch happens at t, should move Pattern P K[t] characters ahead t = -1 # K's length is len(P) + 1, the first element is set to be -1, corresponding to no elements in P. K.append(t) # Add the first element, keep t = -1. for k in range(1, len(P) + 1): # traverse all the elemtn in P, calculate the corresponding value for each element. while(t >= 0 and P[t] != P[k - 1]): # if t=-1, then let t = 0, if t>=0 and current suffix doesn't match, then try a shorter suffix t = K[t] t = t + 1 # If it matches, then the matching position should be one character ahead. K.append(t) # record the matching postion for k print(K) # Match the String T with P m = 0 # Record the current matching position in P when compared with T for i in range(0, len(T)): # traverse T one-by-one while (m >= 0 and P[m] != T[i]): # if mismatch happens at position m, move P forward with K[m] characters and restart comparison m = K[m] m = m + 1 # if position m matches, move P forward to next position if m == len(P): # if m is already the end of K (or P), the a fully match is found. Continue comparison by move P forward K[m] characters print (i - m + 1, i) m = K[m] if __name__ == "__main__": kmp('abcbabca', 'abcbabcabcbabcbabcbabcabcbabcbabca') kmp('abab', 'ababcabababc')
3.265625
3
weeklypedia/publish.py
Nintendofan885/weeklypedia
0
11528
<reponame>Nintendofan885/weeklypedia<filename>weeklypedia/publish.py # -*- coding: utf-8 -*- import os import json from os.path import dirname from argparse import ArgumentParser from clastic.render import AshesRenderFactory from common import DEBUG, DEBUG_LIST_ID, SENDKEY from web import (comma_int, ISSUE_TEMPLATES_PATH) from bake import (Issue, bake_latest_issue, render_index, SUPPORTED_LANGS) _CUR_PATH = dirname(os.path.abspath(__file__)) LIST_ID_MAP = json.load(open(os.path.join(_CUR_PATH, 'secrets.json'))).get('list_ids') def send_issue(lang, is_dev=False): if is_dev: list_id = DEBUG_LIST_ID else: list_id = LIST_ID_MAP[lang] cur_issue = Issue(lang, include_dev=is_dev) return cur_issue.send(list_id, SENDKEY) def get_argparser(): desc = 'Bake and send Weeklypedia issues. (Please fetch first)' prs = ArgumentParser(description=desc) prs.add_argument('--lang', default=None) prs.add_argument('--bake_all', default=False, action='store_true') prs.add_argument('--debug', default=DEBUG, action='store_true') return prs if __name__ == '__main__': issue_ashes_env = AshesRenderFactory(ISSUE_TEMPLATES_PATH, filters={'ci': comma_int}).env parser = get_argparser() args = parser.parse_args() debug = args.debug if args.bake_all: for lang in SUPPORTED_LANGS: bake_latest_issue(issue_ashes_env, lang=lang, include_dev=debug) if args.lang in SUPPORTED_LANGS: lang = args.lang print bake_latest_issue(issue_ashes_env, lang=lang, include_dev=debug) print send_issue(lang, debug)
2.25
2
scripts/words_gen.py
andybui01/Bloom
0
11529
import random import sys # usage: python3 words_gen.py > list.txt N = int(sys.argv[1]) # how many words should be in the resulting list with open("scripts/words.txt", "r") as f: words = f.readlines() for i in range(N): print(words[random.randint(0, 466550 - 1)].rstrip())
3.328125
3
testing/test_input.py
arunumd/Rover
1
11530
import unittest from modules.Input import * class CommandInputTest(unittest.TestCase): def setUp(self): self.field_a_locations = ["gOtOFieldAroW8", " go to fieLDA RoW 18 ", "gOTOfield A rOW88"] self.field_b_locations = ["gOtOFieldBroW8", " go to fieLDB RoW 18 ", "gOTOfield B rOW88"] self.charger_locations = ["gotocharGeR", " goTo cHaRgeR ", " go toCHARGER "] self.planting_locations = ["plantPotaToes inFieLDA rOW7", " plANt caRRottsin fieldBRow 88"] def tearDown(self): self.field_a_locations = [] self.field_b_locations = [] self.charger_locations = [] self.planting_locations = [] def test_field_a_rows(self): self.assertEqual(user_input(self.field_a_locations[0])[0], "FAR8") self.assertEqual(user_input(self.field_a_locations[0])[1], "N/A") self.assertEqual(user_input(self.field_a_locations[1])[0], "FAR18") self.assertEqual(user_input(self.field_a_locations[1])[1], "N/A") self.assertEqual(user_input(self.field_a_locations[2])[0], "FAR88") self.assertEqual(user_input(self.field_a_locations[2])[1], "N/A") def test_field_b_rows(self): self.assertEqual(user_input(self.field_b_locations[0])[0], "FBR8") self.assertEqual(user_input(self.field_b_locations[0])[1], "N/A") self.assertEqual(user_input(self.field_b_locations[1])[0], "FBR18") self.assertEqual(user_input(self.field_b_locations[1])[1], "N/A") self.assertEqual(user_input(self.field_b_locations[2])[0], "FBR88") self.assertEqual(user_input(self.field_b_locations[2])[1], "N/A") def test_charger(self): self.assertEqual(user_input(self.charger_locations[0])[0], "Charger") self.assertEqual(user_input(self.charger_locations[0])[1], "N/A") self.assertEqual(user_input(self.charger_locations[1])[0], "Charger") self.assertEqual(user_input(self.charger_locations[1])[1], "N/A") self.assertEqual(user_input(self.charger_locations[2])[0], "Charger") self.assertEqual(user_input(self.charger_locations[2])[1], "N/A") def test_bad_input(self): self.assertEqual(user_input("")[0], "Invalid task") self.assertEqual(user_input("")[1], "N/A") self.assertEqual(user_input("fg73f37g")[0], "Invalid task") self.assertEqual(user_input("fg73f37g")[1], "N/A") def test_planting_instructions(self): self.assertEqual(user_input(self.planting_locations[0])[0], "FAR7") self.assertEqual(user_input(self.planting_locations[0])[1], "PotaToes") self.assertEqual(user_input(self.planting_locations[1])[0], "FBR88") self.assertEqual(user_input(self.planting_locations[1])[1], "caRRotts") if __name__ == '__main__': unittest.main()
3.640625
4
rmgpy/kinetics/chebyshevTest.py
pm15ma/RMG-Py
1
11531
<gh_stars>1-10 #!/usr/bin/env python3 ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2020 Prof. <NAME> (<EMAIL>), # # Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### """ This script contains unit tests of the :mod:`rmgpy.kinetics.chebyshev` module. """ import unittest import numpy as np from rmgpy.exceptions import KineticsError from rmgpy.kinetics.chebyshev import Chebyshev ################################################################################ class TestChebyshev(unittest.TestCase): """ Contains unit tests of the Chebyshev class. """ def setUp(self): """ A function run before each unit test in this class. """ self.Tmin = 300. self.Tmax = 2000. self.Pmin = 0.01 self.Pmax = 100. self.coeffs = np.array([ [11.67723, 0.729281, -0.11984, 0.00882175], [-1.02669, 0.853639, -0.0323485, -0.027367], [-0.447011, 0.244144, 0.0559122, -0.0101723], [-0.128261, 0.0111596, 0.0281176, 0.00604353], [-0.0117034, -0.0235646, 0.00061009, 0.00401309], [0.0155433, -0.0136846, -0.00463048, -0.000261353], ]) self.comment = """acetyl + O2 -> acetylperoxy""" self.chebyshev = Chebyshev( coeffs=self.coeffs, kunits="cm^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) def test_coeffs(self): """ Test that the Chebyshev coeffs property was properly set. """ self.assertEqual(self.chebyshev.coeffs.value.shape, self.coeffs.shape) for i in range(self.chebyshev.coeffs.value.shape[0]): for j in range(self.chebyshev.coeffs.value.shape[1]): C0 = float(self.coeffs[i, j]) C = float(self.chebyshev.coeffs.value_si[i, j]) if i == 0 and j == 0: C0 -= 6 # Unit conversion from cm^3/(mol*s) to m^3/(mol*s) self.assertAlmostEqual(C0, C, delta=1e-6 * C0) def test_temperature_min(self): """ Test that the Chebyshev Tmin property was properly set. """ self.assertAlmostEqual(self.chebyshev.Tmin.value_si, self.Tmin, 6) def test_temperature_max(self): """ Test that the Chebyshev Tmax property was properly set. """ self.assertAlmostEqual(self.chebyshev.Tmax.value_si, self.Tmax, 6) def test_pressure_min(self): """ Test that the Chebyshev Pmin property was properly set. """ self.assertAlmostEqual(self.chebyshev.Pmin.value_si * 1e-5, self.Pmin, 6) def test_pressure_max(self): """ Test that the Chebyshev Pmax property was properly set. """ self.assertAlmostEqual(self.chebyshev.Pmax.value_si * 1e-5, self.Pmax, 6) def test_comment(self): """ Test that the Chebyshev comment property was properly set. """ self.assertEqual(self.chebyshev.comment, self.comment) def test_is_pressure_dependent(self): """ Test the Chebyshev.is_pressure_dependent() method. """ self.assertTrue(self.chebyshev.is_pressure_dependent()) def test_get_rate_coefficient(self): """ Test the Chebyshev.get_rate_coefficient() method. """ Tlist = np.array([300, 500, 1000, 1500]) Plist = np.array([1e4, 1e5, 1e6]) Kexp = np.array([ [2.29100e+06, 2.58452e+06, 2.57204e+06], [1.10198e+06, 2.04037e+06, 2.57428e+06], [4.37919e+04, 2.36481e+05, 8.57727e+05], [5.20144e+03, 4.10123e+04, 2.50401e+05], ]) for t in range(Tlist.shape[0]): for p in range(Plist.shape[0]): Kact = self.chebyshev.get_rate_coefficient(Tlist[t], Plist[p]) self.assertAlmostEqual(Kact / Kexp[t, p], 1.0, 4, '{0} != {1} within 4 places'.format(Kexp[t, p], Kact)) def test_fit_to_data(self): """ Test the Chebyshev.fit_to_data() method. """ Tdata = np.array( [300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000]) Pdata = np.array([3e3, 1e4, 3e4, 1e5, 3e5, 1e6, 3e7]) nT = len(Tdata) nP = len(Pdata) kdata = np.zeros((nT, nP)) for t in range(nT): for p in range(nP): kdata[t, p] = self.chebyshev.get_rate_coefficient(Tdata[t], Pdata[p]) * 1e6 chebyshev = Chebyshev().fit_to_data(Tdata, Pdata, kdata, kunits="cm^3/(mol*s)", degreeT=6, degreeP=4, Tmin=300, Tmax=2000, Pmin=0.1, Pmax=10.) for t in range(nT): for p in range(nP): kfit = chebyshev.get_rate_coefficient(Tdata[t], Pdata[p]) * 1e6 self.assertAlmostEqual(kfit, kdata[t, p], delta=1e-4 * kdata[t, p]) def test_fit_to_data2(self): """ Test the Chebyshev.fit_to_data() method throws error without enough degrees of freedom. Here only 3 temperatures are given, but the polynomial desired has 6 parameters. """ Tdata = np.array([300, 1200, 2000]) Pdata = np.array([1e5, 3e5, 1e6, 3e7]) nT = len(Tdata) nP = len(Pdata) kdata = np.zeros((nT, nP)) for t in range(nT): for p in range(nP): kdata[t, p] = self.chebyshev.get_rate_coefficient(Tdata[t], Pdata[p]) with self.assertRaises(KineticsError): Chebyshev().fit_to_data(Tdata, Pdata, kdata, kunits="cm^3/(mol*s)", degreeT=12, degreeP=8, Tmin=300, Tmax=2000, Pmin=0.1, Pmax=10.) def test_pickle(self): """ Test that a Chebyshev object can be pickled and unpickled with no loss of information. """ import pickle chebyshev = pickle.loads(pickle.dumps(self.chebyshev, -1)) self.assertEqual(self.chebyshev.coeffs.value.shape[0], chebyshev.coeffs.value.shape[0]) self.assertEqual(self.chebyshev.coeffs.value.shape[1], chebyshev.coeffs.value.shape[1]) for i in range(self.chebyshev.coeffs.value.shape[0]): for j in range(self.chebyshev.coeffs.value.shape[1]): C0 = self.chebyshev.coeffs.value_si[i, j] C = chebyshev.coeffs.value_si[i, j] self.assertAlmostEqual(C0, C, delta=1e-4 * C0) self.assertAlmostEqual(self.chebyshev.Tmin.value, chebyshev.Tmin.value, 4) self.assertEqual(self.chebyshev.Tmin.units, chebyshev.Tmin.units) self.assertAlmostEqual(self.chebyshev.Tmax.value, chebyshev.Tmax.value, 4) self.assertEqual(self.chebyshev.Tmax.units, chebyshev.Tmax.units) self.assertAlmostEqual(self.chebyshev.Pmin.value, chebyshev.Pmin.value, 4) self.assertEqual(self.chebyshev.Pmin.units, chebyshev.Pmin.units) self.assertAlmostEqual(self.chebyshev.Pmax.value, chebyshev.Pmax.value, 4) self.assertEqual(self.chebyshev.Pmax.units, chebyshev.Pmax.units) self.assertEqual(self.chebyshev.comment, chebyshev.comment) def test_repr(self): """ Test that a Chebyshev object can be reconstructed from its repr() output with no loss of information. """ namespace = {} exec('chebyshev = {0!r}'.format(self.chebyshev), globals(), namespace) self.assertIn('chebyshev', namespace) chebyshev = namespace['chebyshev'] self.assertEqual(self.chebyshev.coeffs.value.shape[0], chebyshev.coeffs.value.shape[0]) self.assertEqual(self.chebyshev.coeffs.value.shape[1], chebyshev.coeffs.value.shape[1]) for i in range(self.chebyshev.coeffs.value.shape[0]): for j in range(self.chebyshev.coeffs.value.shape[1]): C0 = self.chebyshev.coeffs.value[i, j] C = chebyshev.coeffs.value[i, j] self.assertAlmostEqual(C0, C, delta=1e-4 * C0) self.assertAlmostEqual(self.chebyshev.Tmin.value, chebyshev.Tmin.value, 4) self.assertEqual(self.chebyshev.Tmin.units, chebyshev.Tmin.units) self.assertAlmostEqual(self.chebyshev.Tmax.value, chebyshev.Tmax.value, 4) self.assertEqual(self.chebyshev.Tmax.units, chebyshev.Tmax.units) self.assertAlmostEqual(self.chebyshev.Pmin.value, chebyshev.Pmin.value, 4) self.assertEqual(self.chebyshev.Pmin.units, chebyshev.Pmin.units) self.assertAlmostEqual(self.chebyshev.Pmax.value, chebyshev.Pmax.value, 4) self.assertEqual(self.chebyshev.Pmax.units, chebyshev.Pmax.units) self.assertEqual(self.chebyshev.comment, chebyshev.comment) def test_change_rate(self): """ Test the Chebyshev.change_rate() method. """ Tlist = np.array([300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500]) k0list = np.array([self.chebyshev.get_rate_coefficient(T, 1e5) for T in Tlist]) self.chebyshev.change_rate(2) for T, kexp in zip(Tlist, k0list): kact = self.chebyshev.get_rate_coefficient(T, 1e5) self.assertAlmostEqual(2 * kexp, kact, delta=1e-6 * kexp) def test_is_identical_to(self): """ Test the Chebyshev.is_identical_to() method. """ # Trivial case, compare to a KineticsModel from rmgpy.kinetics.model import KineticsModel self.assertFalse(self.chebyshev.is_identical_to(KineticsModel())) # Compare to identical Chebyshev new_chebyshev = Chebyshev( coeffs=self.coeffs, kunits="cm^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertTrue(self.chebyshev.is_identical_to(new_chebyshev)) # Compare to Chebyshev with different Tmin/Tmax new_chebyshev = Chebyshev( coeffs=self.coeffs, kunits="cm^3/(mol*s)", Tmin=(200, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertFalse(self.chebyshev.is_identical_to(new_chebyshev)) new_chebyshev = Chebyshev( coeffs=self.coeffs, kunits="cm^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(2500, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertFalse(self.chebyshev.is_identical_to(new_chebyshev)) # Compare to Chebyshev with different degreeT/degreeP new_chebyshev = Chebyshev( coeffs=self.coeffs[0:-1, :], # Remove one T dimension kunits="cm^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertFalse(self.chebyshev.is_identical_to(new_chebyshev)) new_chebyshev = Chebyshev( coeffs=self.coeffs[:, 0:-1], # Remove one P dimension kunits="cm^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertFalse(self.chebyshev.is_identical_to(new_chebyshev)) # Compare to Chebyshev with different units new_chebyshev = Chebyshev( coeffs=self.coeffs, kunits="m^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertFalse(self.chebyshev.is_identical_to(new_chebyshev)) # Compare to Chebyshev with slightly different coefficients new_chebyshev = Chebyshev( coeffs=np.copy(self.coeffs) * 0.01, kunits="cm^3/(mol*s)", Tmin=(self.Tmin, "K"), Tmax=(self.Tmax, "K"), Pmin=(self.Pmin, "bar"), Pmax=(self.Pmax, "bar"), comment=self.comment, ) self.assertFalse(self.chebyshev.is_identical_to(new_chebyshev)) ################################################################################ if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
1.570313
2
SceneDistribution_Blender/Source/__init__.py
FilmakademieRnd/v-p-e-t
62
11532
""" ----------------------------------------------------------------------------- This source file is part of VPET - Virtual Production Editing Tools http://vpet.research.animationsinstitut.de/ http://github.com/FilmakademieRnd/VPET Copyright (c) 2021 <NAME>, Animationsinstitut R&D Lab This project has been initiated in the scope of the EU funded project Dreamspace under grant agreement no 610005 in the years 2014, 2015 and 2016. http://dreamspaceproject.eu/ Post Dreamspace the project has been further developed on behalf of the research and development activities of Animationsinstitut. The VPET component Blender Scene Distribution is intended for research and development purposes only. Commercial use of any kind is not permitted. There is no support by Filmakademie. Since the Blender Scene Distribution is available for free, Filmakademie shall only be liable for intent and gross negligence; warranty is limited to malice. Scene DistributiorUSD may under no circumstances be used for racist, sexual or any illegal purposes. In all non-commercial productions, scientific publications, prototypical non-commercial software tools, etc. using the Blender Scene Distribution Filmakademie has to be named as follows: “VPET-Virtual Production Editing Tool by <NAME>, Animationsinstitut (http://research.animationsinstitut.de)“. In case a company or individual would like to use the Blender Scene Distribution in a commercial surrounding or for commercial purposes, software based on these components or any part thereof, the company/individual will have to contact Filmakademie (research<at>filmakademie.de). ----------------------------------------------------------------------------- """ bl_info = { "name" : "VPET Blender", "author" : "<NAME>", "description" : "", "blender" : (2, 92, 2), "version" : (0, 5, 0), "location" : "VIEW3D", "warning" : "", "category" : "Animationsinstitut" } from typing import Set import bpy from .bl_op import DoDistribute from .bl_op import StopDistribute from .bl_op import SetupScene from .bl_op import InstallZMQ from .bl_panel import VPET_PT_Panel from .tools import initialize from .settings import VpetData from .settings import VpetProperties # imported classes to register classes = (DoDistribute, StopDistribute, SetupScene, VPET_PT_Panel, VpetProperties, InstallZMQ) ## Register classes and VpetSettings # def register(): bpy.types.WindowManager.vpet_data = VpetData() from bpy.utils import register_class for cls in classes: try: register_class(cls) print(f"Registering {cls.__name__}") except Exception as e: print(f"{cls.__name__} "+ str(e)) bpy.types.Scene.vpet_properties = bpy.props.PointerProperty(type=VpetProperties) initialize() print("Registered VPET Addon") ## Unregister for removal of Addon # def unregister(): del bpy.types.WindowManager.vpet_data from bpy.utils import unregister_class for cls in classes: try: unregister_class(cls) except Exception as e: print(f"{cls.__name__} "+ str(e)) print("Unregistered VPET Addon")
1.15625
1
defences/CIFAR10/standard_training.py
calinbiberea/imperial-individual-project
0
11533
# Unlike the other datasets, CIFAR-10 uses ResNet and suffers from # a variety of problems, including exploding gradients import torch import torch.nn as nn from tqdm.notebook import tnrange, tqdm # For loading model sanely import os.path import sys # This here actually adds the path sys.path.append("../../") import models.resnet as resnet # Define the `device` PyTorch will be running on, please hope it is CUDA device = "cuda" if torch.cuda.is_available() else "cpu" print("Notebook will use PyTorch Device: " + device.upper()) # Helps adjust learning rate for better results def adjust_learning_rate(optimizer, epoch, learning_rate, long_training): actual_learning_rate = learning_rate if long_training: first_update_threshold = 100 second_update_threshold = 150 else: first_update_threshold = 20 second_update_threshold = 25 if epoch >= first_update_threshold: actual_learning_rate = 0.01 if epoch >= second_update_threshold: actual_learning_rate = 0.001 for param_group in optimizer.param_groups: param_group["lr"] = actual_learning_rate # This method creates a new model and also trains it def standard_training( trainSetLoader, long_training=True, load_if_available=False, load_path="../models_data/CIFAR10/cifar10_standard" ): # Number of epochs is decided by training length if long_training: epochs = 200 else: epochs = 30 learning_rate = 0.1 # Network parameters loss_function = nn.CrossEntropyLoss() model = resnet.ResNet18() model = model.to(device) model = nn.DataParallel(model) model.train() # Consider using ADAM here as another gradient descent algorithm optimizer = torch.optim.SGD( model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0002 ) # If a trained model already exists, give up the training part if load_if_available and os.path.isfile(load_path): print("Found already trained model...") model = torch.load(load_path) print("... loaded!") else: print("Training the model...") # Use a pretty progress bar to show updates for epoch in tnrange(epochs, desc="Training Progress"): # Print loss results total_epoch_loss = 0 # Adjust the learning rate adjust_learning_rate(optimizer, epoch, learning_rate, long_training) for _, (images, labels) in enumerate(tqdm(trainSetLoader, desc="Batches")): # Cast to proper tensors images, labels = images.to(device), labels.to(device) # Clean the gradients optimizer.zero_grad() # Predict logits = model(images) # Calculate loss loss = loss_function(logits, labels) # Gradient descent loss.backward() # Add total accumulated loss total_epoch_loss += loss.item() # Also clip the gradients (ReLU leads to vanishing or # exploding gradients) torch.nn.utils.clip_grad_norm_(model.parameters(), 10) optimizer.step() print("Loss at epoch {} is {}".format(epoch, total_epoch_loss)) print("... done!") # Make sure the model is in eval mode before returning model.eval() return model
2.8125
3
src/romt/manifest.py
hehaoqian/romt
29
11534
<reponame>hehaoqian/romt<filename>src/romt/manifest.py #!/usr/bin/env python3 # coding=utf-8 import copy from pathlib import Path from typing import ( Any, Generator, Iterable, List, MutableMapping, Optional, ) import toml from romt import error def target_matches_any(target: str, expected_targets: Iterable[str]) -> bool: if target == "*": return True for expected in expected_targets: if target == expected or expected == "*": return True return False class Package: def __init__( self, name: str, target: str, details: MutableMapping[str, Any] ): self.name = name self.target = target self.available = details["available"] self.xz_url = details.get("xz_url", "") @property def has_rel_path(self) -> bool: return self.xz_url != "" @property def rel_path(self) -> str: if not self.has_rel_path: raise ValueError( "Package {}/{} missing xz_url".format(self.name, self.target) ) url = self.xz_url prefix = "/dist/" return url[url.index(prefix) + len(prefix) :] class Manifest: def __init__(self, raw_dict: MutableMapping[str, Any]): self._dict = raw_dict @staticmethod def from_toml_path(toml_path: Path) -> "Manifest": return Manifest(toml.load(toml_path)) def clone(self) -> "Manifest": return Manifest(copy.deepcopy(self._dict)) @property def _rust_src_version(self) -> str: version = self._dict["pkg"]["rust-src"]["version"] # Sample version lines found below [pkg.rust-src]: # version = "1.43.0-beta.5 (934ae7739 2020-04-06)" # version = "1.44.0-nightly (42abbd887 2020-04-07)" # version = "1.42.0 (b8cedc004 2020-03-09)" return version @property def channel(self) -> str: version = self._rust_src_version if "-beta" in version: channel = "beta" elif "-nightly" in version: channel = "nightly" else: channel = "stable" return channel @property def version(self) -> str: version = self._rust_src_version # version = "1.44.0-nightly (42abbd887 2020-04-07)" # version = "1.42.0 (b8cedc004 2020-03-09)" return version.split("-")[0].split()[0] @property def date(self) -> str: return self._dict["date"] @property def spec(self) -> str: return "{}-{}".format(self.channel, self.date) @property def ident(self) -> str: return "{}({})".format(self.spec, self.version) def set_package_available( self, package_name: str, target: str, available: bool = True ) -> None: details = self._dict["pkg"][package_name]["target"][target] if available and "xz_url" not in details: raise error.AbortError( "package {}/{} set available but missing xz_url".format( package_name, target ) ) details["available"] = available def get_package(self, package_name: str, target: str) -> Package: details = self._dict["pkg"][package_name]["target"][target] return Package(package_name, target, details) def gen_packages(self) -> Generator[Package, None, None]: """Generate Package for all (name, target) in manifest.""" for name, package_dict in self._dict["pkg"].items(): for target in package_dict["target"].keys(): yield self.get_package(name, target) def gen_available_packages( self, *, targets: Optional[Iterable[str]] = None ) -> Generator[Package, None, None]: """gen_packages() for available packages matching targets.""" for package in self.gen_packages(): if package.available: if targets is None or target_matches_any( package.target, targets ): yield package def available_packages(self) -> List[Package]: return list(self.gen_available_packages()) def _targets_from_packages(self, packages: Iterable[Package]) -> List[str]: targets = set(p.target for p in packages) targets.discard("*") return sorted(targets) def all_targets(self) -> List[str]: return self._targets_from_packages(self.gen_packages()) def available_targets(self) -> List[str]: return self._targets_from_packages(self.gen_available_packages())
2.296875
2
examples/unread_sms.py
MikeRixWolfe/pygooglevoice
1
11535
<reponame>MikeRixWolfe/pygooglevoice from googlevoice import Voice voice = Voice() voice.login() for message in voice.sms().messages: #if not message.isRead: print(message.id, message.phoneNumber, message.messageText) #message.mark(1)
2.53125
3
libra/handlers/user.py
pitomba/libra
2
11536
# coding: utf-8 from tornado.web import RequestHandler from libra.handlers.base import authenticated class UserHandler(RequestHandler): @authenticated def post(self, user, **kwargs): self.write({"msg": "Success"})
2.078125
2
plextraktsync/commands/unmatched.py
RoyXiang/PlexTraktSync
0
11537
<filename>plextraktsync/commands/unmatched.py import click from plextraktsync.commands.login import ensure_login from plextraktsync.factory import factory from plextraktsync.walker import WalkConfig, Walker def unmatched(no_progress_bar: bool, local: bool): config = factory.run_config().update(progressbar=not no_progress_bar) ensure_login() plex = factory.plex_api() trakt = factory.trakt_api() mf = factory.media_factory() pb = factory.progressbar(config.progressbar) wc = WalkConfig() walker = Walker(plex, trakt, mf, wc, progressbar=pb) if not wc.is_valid(): click.echo("Nothing to scan, this is likely due conflicting options given.") return failed = [] if local: for pm in walker.get_plex_movies(): if pm.guids[0].provider == 'local': failed.append(pm) else: for pm in walker.get_plex_movies(): movie = mf.resolve_any(pm) if not movie: failed.append(pm) for pm in failed: p = pm.item url = plex.media_url(pm) print("=" * 80) print(f"No match: {pm}") print(f"URL: {url}") print(f"Title: {p.title}") print(f"Year: {p.year}") print(f"Updated At: {p.updatedAt}") for l in p.locations: print(f"Location: {l}") print("")
2.609375
3
montepython/likelihoods/covfefe/__init__.py
Maquiner/Monte_Python_2CCL
1
11538
import os import numpy as np from montepython.likelihood_class import Likelihood import montepython.io_mp as io_mp import warnings import ccl_tools as tools import pyccl as ccl class covfefe(Likelihood): # initialization routine def __init__(self, path, data, command_line): Likelihood.__init__(self, path, data, command_line) self.nb = data.cosmo_arguments['n_bins'] self.cm = data.cosmo_arguments['cov'] n_sims = 20000 # Load Covariance matrix fn = 'cov_{}_{}.npz'.format(self.cm,self.nb) self.cov = np.load(os.path.join(self.data_directory, fn))['arr_0'] if self.cm=='sim': factor = (n_sims-self.cov.shape[0]-2.)/(n_sims-1.) else: factor = 1. self.icov = factor*np.linalg.inv(self.cov) # Load ell bandpowers self.ell_bp = np.load(os.path.join(self.data_directory, 'ell_bp.npz'))['lsims'].astype(int) self.nl = len(self.ell_bp) # Load photo_z fn = 'z_{}.npz'.format(self.nb) self.z = np.load(os.path.join(self.data_directory, fn))['arr_0'] fn = 'pz_{}.npz'.format(self.nb) self.pz = np.load(os.path.join(self.data_directory, fn))['arr_0'] fn = 'bz_{}.npz'.format(self.nb) self.bz = np.load(os.path.join(self.data_directory, fn))['arr_0'] # Load data fn = 'data_{}.npz'.format(self.nb) self.data = np.load(os.path.join(self.data_directory, fn))['arr_0'] # end of initialization # compute likelihood def loglkl(self, cosmo, data): # Get theory Cls cosmo_ccl = tools.get_cosmo_ccl(cosmo.pars) tracers = tools.get_tracers_ccl(cosmo_ccl, self.z, self.pz, self.bz) theory = tools.get_cls_ccl(cosmo_ccl, tracers, self.ell_bp) # Get chi2 chi2 = (self.data-theory).dot(self.icov).dot(self.data-theory) lkl = - 0.5 * chi2 return lkl
2.1875
2
tests/integration/web3/conftest.py
cducrest/eth-tester-rpc
3
11539
<filename>tests/integration/web3/conftest.py from eth_utils import ( to_bytes, ) from eth_utils.toolz import ( identity, ) import pytest from web3._utils.module_testing.emitter_contract import ( CONTRACT_EMITTER_ABI, CONTRACT_EMITTER_CODE, ) from web3._utils.module_testing.math_contract import ( MATH_ABI, MATH_BYTECODE, ) from web3._utils.module_testing.revert_contract import ( _REVERT_CONTRACT_ABI, REVERT_CONTRACT_BYTECODE, ) @pytest.fixture(scope="module", params=[lambda x: to_bytes(hexstr=x), identity]) def address_conversion_func(request): return request.param @pytest.fixture(scope="module") def math_contract_factory(web3): contract_factory = web3.eth.contract(abi=MATH_ABI, bytecode=MATH_BYTECODE) return contract_factory @pytest.fixture(scope="module") def emitter_contract_factory(web3): contract_factory = web3.eth.contract(abi=CONTRACT_EMITTER_ABI, bytecode=CONTRACT_EMITTER_CODE) return contract_factory @pytest.fixture(scope="module") def revert_contract_factory(web3): contract_factory = web3.eth.contract( abi=_REVERT_CONTRACT_ABI, bytecode=REVERT_CONTRACT_BYTECODE ) return contract_factory
1.632813
2
planning/domains/depth/depthGenerator.py
xlbandy/fape
14
11540
from __future__ import division import itertools import json import math import os import random import shutil import subprocess import sys durationA = str(5) durationB = str(4) durationC = str(1) def main(): if len(sys.argv) > 1: nbDepth = int(sys.argv[1]) if nbDepth < 2 : nbDepth =2 else : nbDepth =2 mainFolder = "depth" if not os.path.exists(mainFolder): subprocess.call(["mkdir", mainFolder]) generateDomain("depth", nbDepth) #print "Every file has been written. Exiting" def generateDomain(folderName, nbDepth): domainFilename = folderName + "/" + folderName + "-flat" + str(nbDepth) + ".dom.anml" printDomainToFile(domainFilename, nbDepth) domainFilename = folderName + "/" + folderName + "-hier" + str(nbDepth) + ".dom.anml" printDomainHierToFile(domainFilename, nbDepth) def printDomainToFile(domainFilename, nbDepth): with open(domainFilename, "w") as f: for i in range(0, nbDepth): f.write("predicate a" + str(i+1) +"();\n") f.write("predicate b" + str(i+1) +"();\n") f.write("predicate c" + str(i+1) +"();\n") f.write("predicate d" + str(i+1) +"();\n") f.write("predicate e" + str(i+1) +"();\n") f.write("\naction An" + str(i+1) + " () {\n") f.write("\tduration := " + durationA + ";\n") if i > 0: f.write("\t[start] {\n") f.write("\t\tb"+ str(i) +" == true;\n") f.write("\t\td"+ str(i) +" == true;\n") f.write("\t\te"+ str(i) +" == true;\n") f.write("\t};\n") f.write("\t[start] a" + str(i+1) + " := true;\n") f.write("\t[end] {\n") f.write("\t\ta" + str(i+1) + " := false;\n") f.write("\t\tb" + str(i+1) + " := true;\n") f.write("\t\td" + str(i+1) + " := false;\n") f.write("\t};\n") f.write("};\n") f.write("\naction Bn" + str(i+1) + " () {\n") f.write("\tduration := " + durationB + ";\n") f.write("\t[start] a" + str(i+1) + " == true;\n") f.write("\t[start] c" + str(i+1) + " := true;\n") f.write("\t[end] {\n") f.write("\t\tc" + str(i+1) + " := false;\n") f.write("\t\td" + str(i+1) + " := true;\n") f.write("\t};\n") f.write("};\n") f.write("\naction Cn" + str(i+1) + " () {\n") f.write("\tduration := " + durationC + ";\n") f.write("\t[start] c" + str(i+1) + " == true;\n") f.write("\t[end] {\n") f.write("\t\tb" + str(i+1) + " := false;\n") f.write("\t\te" + str(i+1) + " := true;\n") f.write("\t};\n") f.write("};\n") ######################## problem ############### f.write("\n/*******Problem************/\n") f.write("[all] contains{\n") f.write("\tCn" + str(nbDepth) +"();\n") f.write("};") def printDomainHierToFile(domainFilename, nbDepth): with open(domainFilename, "w") as f: for i in range(0, nbDepth): if i == 0: f.write("\naction An" + str(i+1) + " () {\n") f.write("\tmotivated;\n") f.write("\tduration := " + durationA + ";\n") f.write("};\n") else: f.write("\naction An" + str(i+1) + " () {\n") f.write("\tmotivated;\n") f.write("\tduration := " + durationA + ";\n") f.write("\ta : ABC" + str(i) + "();\n") f.write("\t end(a) < start;\n") f.write("};\n") f.write("\naction Bn" + str(i+1) + " () {\n") f.write("\tduration := " + durationB + ";\n") f.write("\tmotivated;\n") f.write("};\n") f.write("\naction Cn" + str(i+1) + " () {\n") f.write("\tduration := " + durationC + ";\n") f.write("\tmotivated;\n") f.write("};\n") f.write("\naction ABC" + str(i+1) + " () {\n") f.write("\t[all] contains {\n") f.write("\t\t b" + str(i+1) + " : An" + str(i+1) + "();\n") f.write("\t\t d" + str(i+1) + " : Bn" + str(i+1) + "();\n") f.write("\t\t e" + str(i+1) + " : Cn" + str(i+1) + "();\n") f.write("\t};\n") f.write("\tstart(b" + str(i+1) + ") < start(d" + str(i+1) + ");\n") f.write("\tend(d" + str(i+1) + ") < end(b" + str(i+1) + ");\n") f.write("\tstart(d" + str(i+1) + ") < start(e" + str(i+1) + ");\n") f.write("\tend(e" + str(i+1) + ") < end(d" + str(i+1) + ");\n") f.write("};\n") #################### problem ############# f.write("\n/*******Problem************/\n") f.write("[all] contains{\n") f.write("\tCn" + str(nbDepth) +"();\n") f.write("};") if __name__ == "__main__": main()
2.4375
2
idc/settings.py
fedorov/IDC-WebApp
0
11541
### # Copyright 2015-2020, Institute for Systems Biology # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ### from __future__ import print_function from builtins import str from builtins import object import os import re import datetime from os.path import join, dirname, exists import sys import dotenv from socket import gethostname, gethostbyname SECURE_LOCAL_PATH = os.environ.get('SECURE_LOCAL_PATH', '') if not exists(join(dirname(__file__), '../{}.env'.format(SECURE_LOCAL_PATH))): print("[ERROR] Couldn't open .env file expected at {}!".format( join(dirname(__file__), '../{}.env'.format(SECURE_LOCAL_PATH))) ) print("[ERROR] Exiting settings.py load - check your Pycharm settings and secure_path.env file.") exit(1) dotenv.read_dotenv(join(dirname(__file__), '../{}.env'.format(SECURE_LOCAL_PATH))) APP_ENGINE_FLEX = 'aef-' APP_ENGINE = 'Google App Engine/' BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + os.sep SHARED_SOURCE_DIRECTORIES = [ 'IDC-Common' ] # Add the shared Django application subdirectory to the Python module search path for directory_name in SHARED_SOURCE_DIRECTORIES: sys.path.append(os.path.join(BASE_DIR, directory_name)) DEBUG = (os.environ.get('DEBUG', 'False') == 'True') CONNECTION_IS_LOCAL = (os.environ.get('DATABASE_HOST', '127.0.0.1') == 'localhost') IS_CIRCLE = (os.environ.get('CI', None) is not None) DEBUG_TOOLBAR = ((os.environ.get('DEBUG_TOOLBAR', 'False') == 'True') and CONNECTION_IS_LOCAL) IMG_QUOTA = os.environ.get('IMG_QUOTA', '137') print("[STATUS] DEBUG mode is {}".format(str(DEBUG)), file=sys.stdout) RESTRICT_ACCESS = (os.environ.get('RESTRICT_ACCESS', 'True') == 'True') RESTRICTED_ACCESS_GROUPS = os.environ.get('RESTRICTED_ACCESS_GROUPS', '').split(',') if RESTRICT_ACCESS: print("[STATUS] Access to the site is restricted to members of the {} group(s).".format(", ".join(RESTRICTED_ACCESS_GROUPS)), file=sys.stdout) else: print("[STATUS] Access to the site is NOT restricted!", file=sys.stdout) # Theoretically Nginx allows us to use '*' for ALLOWED_HOSTS but... ALLOWED_HOSTS = list(set(os.environ.get('ALLOWED_HOST', 'localhost').split(',') + ['localhost', '127.0.0.1', '[::1]', gethostname(), gethostbyname(gethostname()),])) #ALLOWED_HOSTS = ['*'] SSL_DIR = os.path.abspath(os.path.dirname(__file__))+os.sep ADMINS = () MANAGERS = ADMINS GCLOUD_PROJECT_ID = os.environ.get('GCLOUD_PROJECT_ID', '') GCLOUD_PROJECT_NUMBER = os.environ.get('GCLOUD_PROJECT_NUMBER', '') BIGQUERY_PROJECT_ID = os.environ.get('BIGQUERY_PROJECT_ID', GCLOUD_PROJECT_ID) BIGQUERY_DATA_PROJECT_ID = os.environ.get('BIGQUERY_DATA_PROJECT_ID', GCLOUD_PROJECT_ID) # Deployment module CRON_MODULE = os.environ.get('CRON_MODULE') # Log Names WEBAPP_LOGIN_LOG_NAME = os.environ.get('WEBAPP_LOGIN_LOG_NAME', 'local_dev_logging') BASE_URL = os.environ.get('BASE_URL', 'https://idc-dev.appspot.com') BASE_API_URL = os.environ.get('BASE_API_URL', 'https://api-dot-idc-dev.appspot.com') API_HOST = os.environ.get('API_HOST', 'api-dot-idc-dev.appspot.com') # Compute services - Should not be necessary in webapp PAIRWISE_SERVICE_URL = os.environ.get('PAIRWISE_SERVICE_URL', None) # Data Buckets GCLOUD_BUCKET = os.environ.get('GOOGLE_STORAGE_BUCKET') # BigQuery cohort storage settings BIGQUERY_COHORT_DATASET_ID = os.environ.get('BIGQUERY_COHORT_DATASET_ID', 'cohort_dataset') BIGQUERY_COHORT_TABLE_ID = os.environ.get('BIGQUERY_COHORT_TABLE_ID', 'developer_cohorts') BIGQUERY_IDC_TABLE_ID = os.environ.get('BIGQUERY_IDC_TABLE_ID', '') MAX_BQ_INSERT = int(os.environ.get('MAX_BQ_INSERT', '500')) USER_DATA_ON = bool(os.environ.get('USER_DATA_ON', 'False') == 'True') database_config = { 'default': { 'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.mysql'), 'HOST': os.environ.get('DATABASE_HOST', '127.0.0.1'), 'NAME': os.environ.get('DATABASE_NAME', 'dev'), 'USER': os.environ.get('DATABASE_USER', 'django-user'), 'PASSWORD': os.<PASSWORD>('DATABASE_PASSWORD') } } # On the build system, we need to use build-system specific database information if os.environ.get('CI', None) is not None: database_config = { 'default': { 'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.mysql'), 'HOST': os.environ.get('DATABASE_HOST_BUILD', '127.0.0.1'), 'NAME': os.environ.get('DATABASE_NAME_BUILD', ''), 'PORT': 3306, 'USER': os.environ.get('DATABASE_USER_BUILD'), 'PASSWORD': os.environ.get('MYSQL_ROOT_PASSWORD_BUILD') } } DATABASES = database_config DB_SOCKET = database_config['default']['HOST'] if 'cloudsql' in database_config['default']['HOST'] else None IS_DEV = (os.environ.get('IS_DEV', 'False') == 'True') IS_APP_ENGINE_FLEX = os.getenv('GAE_INSTANCE', '').startswith(APP_ENGINE_FLEX) IS_APP_ENGINE = os.getenv('SERVER_SOFTWARE', '').startswith(APP_ENGINE) VERSION = "{}.{}".format("local-dev", datetime.datetime.now().strftime('%Y%m%d%H%M')) if exists(join(dirname(__file__), '../version.env')): dotenv.read_dotenv(join(dirname(__file__), '../version.env')) else: if IS_DEV: import git repo = git.Repo(path="/home/vagrant/www/",search_parent_directories=True) VERSION = "{}.{}.{}".format("local-dev", datetime.datetime.now().strftime('%Y%m%d%H%M'), str(repo.head.object.hexsha)[-6:]) APP_VERSION = os.environ.get("APP_VERSION", VERSION) DEV_TIER = bool(DEBUG or re.search(r'^dev\.',APP_VERSION)) # If this is a GAE-Flex deployment, we don't need to specify SSL; the proxy will take # care of that for us if 'DB_SSL_CERT' in os.environ and not IS_APP_ENGINE_FLEX: DATABASES['default']['OPTIONS'] = { 'ssl': { 'ca': os.environ.get('DB_SSL_CA'), 'cert': os.environ.get('DB_SSL_CERT'), 'key': os.environ.get('DB_SSL_KEY') } } # Default to localhost for the site ID SITE_ID = 2 if IS_APP_ENGINE_FLEX or IS_APP_ENGINE: print("[STATUS] AppEngine Flex detected.", file=sys.stdout) SITE_ID = 3 def get_project_identifier(): return BIGQUERY_PROJECT_ID # Set cohort table here if BIGQUERY_COHORT_TABLE_ID is None: raise Exception("Developer-specific cohort table ID is not set.") BQ_MAX_ATTEMPTS = int(os.environ.get('BQ_MAX_ATTEMPTS', '10')) API_USER = os.environ.get('API_USER', 'api_user') API_AUTH_KEY = os.environ.get('API_AUTH_KEY', 'Token') # TODO Remove duplicate class. # # This class is retained here, as it is required by bq_data_access/v1. # bq_data_access/v2 uses the class from the bq_data_access/bigquery_cohorts module. class BigQueryCohortStorageSettings(object): def __init__(self, dataset_id, table_id): self.dataset_id = dataset_id self.table_id = table_id def GET_BQ_COHORT_SETTINGS(): return BigQueryCohortStorageSettings(BIGQUERY_COHORT_DATASET_ID, BIGQUERY_COHORT_TABLE_ID) USE_CLOUD_STORAGE = bool(os.environ.get('USE_CLOUD_STORAGE', 'False') == 'True') SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') CSRF_COOKIE_SECURE = bool(os.environ.get('CSRF_COOKIE_SECURE', 'True') == 'True') SESSION_COOKIE_SECURE = bool(os.environ.get('SESSION_COOKIE_SECURE', 'True') == 'True') SECURE_SSL_REDIRECT = bool(os.environ.get('SECURE_SSL_REDIRECT', 'True') == 'True') SECURE_REDIRECT_EXEMPT = [] if SECURE_SSL_REDIRECT: # Exempt the health check so it can go through SECURE_REDIRECT_EXEMPT = [r'^_ah/(vm_)?health$', ] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Los_Angeles' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_FOLDER = os.environ.get('MEDIA_FOLDER', 'uploads/') MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', MEDIA_FOLDER) MEDIA_ROOT = os.path.normpath(MEDIA_ROOT) # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = 'static_collex' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = os.environ.get('STATIC_URL', '/static/') GCS_STORAGE_URI = os.environ.get('GCS_STORAGE_URI', 'https://storage.googleapis.com/') # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '') SECURE_HSTS_INCLUDE_SUBDOMAINS = (os.environ.get('SECURE_HSTS_INCLUDE_SUBDOMAINS','True') == 'True') SECURE_HSTS_PRELOAD = (os.environ.get('SECURE_HSTS_PRELOAD','True') == 'True') SECURE_HSTS_SECONDS = int(os.environ.get('SECURE_HSTS_SECONDS','3600')) MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'idc.checkreqsize_middleware.CheckReqSize', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'adminrestrict.middleware.AdminPagesRestrictMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'idc.team_only_middleware.TeamOnly', # Uncomment the next line for simple clickjacking protection: 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'offline.middleware.OfflineMiddleware', ] ROOT_URLCONF = 'idc.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'idc.wsgi.application' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'django.contrib.admindocs', 'anymail', 'idc', 'data_upload', 'sharing', 'cohorts', 'idc_collections', 'offline', 'adminrestrict' ) ############################# # django-session-security # ############################# INSTALLED_APPS += ('session_security',) SESSION_SECURITY_WARN_AFTER = int(os.environ.get('SESSION_SECURITY_WARN_AFTER','540')) SESSION_SECURITY_EXPIRE_AFTER = int(os.environ.get('SESSION_SECURITY_EXPIRE_AFTER','600')) SESSION_EXPIRE_AT_BROWSER_CLOSE = True MIDDLEWARE.append( # for django-session-security -- must go *after* AuthenticationMiddleware 'session_security.middleware.SessionSecurityMiddleware', ) ############################### # End django-session-security # ############################### TEST_RUNNER = 'django.test.runner.DiscoverRunner' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue' }, }, 'formatters': { 'verbose': { 'format': '[%(levelname)s] @%(asctime)s in %(module)s/%(process)d/%(thread)d - %(message)s' }, 'simple': { 'format': '[%(levelname)s] @%(asctime)s in %(module)s: %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console_dev': { 'level': 'DEBUG', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, 'console_prod': { 'level': 'DEBUG', 'filters': ['require_debug_false'], 'class': 'logging.StreamHandler', 'formatter': 'simple', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'main_logger': { 'handlers': ['console_dev', 'console_prod'], 'level': 'DEBUG', 'propagate': True, }, 'allauth': { 'handlers': ['console_dev', 'console_prod'], 'level': 'DEBUG', 'propagate': True, }, 'google_helpers': { 'handlers': ['console_dev', 'console_prod'], 'level': 'DEBUG', 'propagate': True, }, 'data_upload': { 'handlers': ['console_dev', 'console_prod'], 'level': 'DEBUG', 'propagate': True, }, }, } # Force allauth to only use https ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' # ...but not if this is a local dev build if IS_DEV: ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'http' ########################## # Start django-allauth # ########################## LOGIN_REDIRECT_URL = '/extended_login/' INSTALLED_APPS += ( 'accounts', 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.google', 'rest_framework.authtoken' ) # Template Engine Settings TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', # add any necessary template paths here 'DIRS': [ os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'templates', 'accounts'), ], 'APP_DIRS': True, 'OPTIONS': { # add any context processors here 'context_processors': ( 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.tz', 'finalware.context_processors.contextify', 'idc.context_processor.additional_context', ), # add any loaders here; if using the defaults, we can comment it out # 'loaders': ( # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader' # ), 'debug': DEBUG, }, }, ] AUTHENTICATION_BACKENDS = ( # Needed to login by username in Django admin, regardless of `allauth` "django.contrib.auth.backends.ModelBackend", # `allauth` specific authentication methods, such as login by e-mail "allauth.account.auth_backends.AuthenticationBackend", ) SOCIALACCOUNT_PROVIDERS = \ { 'google': { 'SCOPE': ['profile', 'email'], 'AUTH_PARAMS': { 'access_type': 'online' } } } ACCOUNT_AUTHENTICATION_METHOD = "email" ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_USERNAME_REQUIRED = bool(os.environ.get('ACCOUNT_USERNAME_REQUIRED', 'False') == 'True') ACCOUNT_EMAIL_VERIFICATION = os.environ.get('ACCOUNT_EMAIL_VERIFICATION', 'mandatory').lower() ACCOUNT_EMAIL_SUBJECT_PREFIX = "[Imaging Data Commons] " ACCOUNTS_PASSWORD_EXPIRATION = os.environ.get('ACCOUNTS_PASSWORD_EXPIRATION',120) # Max password age in days ACCOUNTS_PASSWORD_HISTORY = os.environ.get('ACCOUNTS_PASSWORD_HISTORY', 5) # Max password history kept ACCOUNTS_ALLOWANCES = list(set(os.environ.get('ACCOUNTS_ALLOWANCES','').split(','))) ########################## # End django-allauth # ########################## ########################## # Django local auth # ########################## AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': { 'min_length': 16, } }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'idc.validators.PasswordComplexityValidator', 'OPTIONS': { 'min_length': 16, 'special_char_list': '!@#$%^&*+:;?' } }, { 'NAME': 'idc.validators.PasswordReuseValidator' } ] ######################################### # MailGun Email Settings for requests # ######################################### # # These settings allow use of MailGun as a simple API call EMAIL_SERVICE_API_URL = os.environ.get('EMAIL_SERVICE_API_URL', '') EMAIL_SERVICE_API_KEY = os.environ.get('EMAIL_SERVICE_API_KEY', '') NOTIFICATION_EMAIL_FROM_ADDRESS = os.environ.get('NOTIFICATOON_EMAIL_FROM_ADDRESS', '<EMAIL>') ######################### # django-anymail # ######################### # # Anymail lets us use the Django mail system with mailgun (eg. in local account email verification) ANYMAIL = { "MAILGUN_API_KEY": EMAIL_SERVICE_API_KEY, "MAILGUN_SENDER_DOMAIN": 'mg.canceridc.dev', # your Mailgun domain, if needed } EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend" DEFAULT_FROM_EMAIL = NOTIFICATION_EMAIL_FROM_ADDRESS SERVER_EMAIL = "<EMAIL>" GOOGLE_APPLICATION_CREDENTIALS = join(dirname(__file__), '../{}{}'.format(SECURE_LOCAL_PATH,os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''))) OAUTH2_CLIENT_ID = os.environ.get('OAUTH2_CLIENT_ID', '') OAUTH2_CLIENT_SECRET = os.environ.get('OAUTH2_CLIENT_SECRET', '') if not exists(GOOGLE_APPLICATION_CREDENTIALS): print("[ERROR] Google application credentials file wasn't found! Provided path: {}".format(GOOGLE_APPLICATION_CREDENTIALS)) exit(1) ################################# # For NIH/eRA Commons login # ################################# GOOGLE_GROUP_ADMIN = os.environ.get('GOOGLE_GROUP_ADMIN', '') SUPERADMIN_FOR_REPORTS = os.environ.get('SUPERADMIN_FOR_REPORTS', '') ############################## # Start django-finalware # ############################## # # This should only be done on a local system which is running against its own VM, or during CircleCI testing. # Deployed systems will already have a site superuser so this would simply overwrite that user. # NEVER ENABLE this in production! # if (IS_DEV and CONNECTION_IS_LOCAL) or IS_CIRCLE: INSTALLED_APPS += ( 'finalware',) SITE_SUPERUSER_USERNAME = os.environ.get('SUPERUSER_USERNAME', '') SITE_SUPERUSER_EMAIL = '' SITE_SUPERUSER_PASSWORD = <PASSWORD>('<PASSWORD>') # ############################ # End django-finalware # ############################ CONN_MAX_AGE = 60 ############################ # CUSTOM TEMPLATE CONTEXT ############################ ############################ # METRICS SETTINGS ############################ SITE_GOOGLE_ANALYTICS = bool(os.environ.get('SITE_GOOGLE_ANALYTICS_TRACKING_ID', None) is not None) SITE_GOOGLE_ANALYTICS_TRACKING_ID = os.environ.get('SITE_GOOGLE_ANALYTICS_TRACKING_ID', '') ############################################################## # MAXes to prevent size-limited events from causing errors ############################################################## # Google App Engine has a response size limit of 32M. ~65k entries from the cohort_filelist view will # equal just under the 32M limit. If each individual listing is ever lengthened or shortened this # number should be adjusted MAX_FILE_LIST_REQUEST = 65000 MAX_BQ_RECORD_RESULT = int(os.environ.get('MAX_BQ_RECORD_RESULT', '5000')) # Rough max file size to allow for eg. barcode list upload, to prevent triggering RequestDataTooBig FILE_SIZE_UPLOAD_MAX = 1950000 ################################# # DICOM Viewer settings ################################# DICOM_VIEWER = os.environ.get('DICOM_VIEWER', None) ################################# # SOLR settings ################################# SOLR_URI = os.environ.get('SOLR_URI', '') SOLR_LOGIN = os.environ.get('SOLR_LOGIN', '') SOLR_PASSWORD = os.environ.get('SOLR_PASSWORD', '') SOLR_CERT = join(dirname(dirname(__file__)), "{}{}".format(SECURE_LOCAL_PATH, os.environ.get('SOLR_CERT', ''))) DEFAULT_FETCH_COUNT = os.environ.get('DEFAULT_FETCH_COUNT', 10) # Explicitly check for known problems in descrpitions and names provided by users BLACKLIST_RE = r'((?i)<script>|(?i)</script>|!\[\]|!!\[\]|\[\]\[\".*\"\]|(?i)<iframe>|(?i)</iframe>)' if DEBUG and DEBUG_TOOLBAR: INSTALLED_APPS += ('debug_toolbar',) MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware',) DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', ] SHOW_TOOLBAR_CALLBACK = True INTERNAL_IPS = (os.environ.get('INTERNAL_IP', ''),) ################## # OHIF_SETTINGS ################## # # default is to add trailing '/' to urls ie /callback becomes /callback/. Ohif does not like /callback/ ! APPEND_SLASH = False DICOM_STORE_PATH=os.environ.get('DICOM_STORE_PATH','') # Log the version of our app print("[STATUS] Application Version is {}".format(APP_VERSION))
1.984375
2
examples/fixed_play.py
wwxFromTju/malib
6
11542
<gh_stars>1-10 # Created by yingwen at 2019-03-16 from multiprocessing import Process from malib.agents.agent_factory import * from malib.environments import DifferentialGame from malib.logger.utils import set_logger from malib.samplers.sampler import MASampler from malib.trainers import MATrainer from malib.utils.random import set_seed def get_agent_by_type(type_name, i, env, hidden_layer_sizes, max_replay_buffer_size): if type_name == "SAC": return get_sac_agent( env, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) elif type_name == "ROMMEO": return get_rommeo_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) elif type_name == "ROMMEO-UNI": return get_rommeo_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, uniform=True, ) elif type_name == "DDPG-OM": return get_ddpgom_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) elif type_name == "DDPG-TOM": return get_ddpgtom_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) elif type_name == "DDPG": return get_ddpg_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) elif type_name == "MADDPG": return get_maddpg_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) elif type_name == "MFAC": return get_maddpg_agent( env, agent_id=i, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) def train_fixed(seed, agent_setting, game_name="ma_softq"): set_seed(seed) suffix = f"fixed_play1/{game_name}/{agent_setting}/{seed}" set_logger(suffix) batch_size = 512 training_steps = 2000 exploration_steps = 100 max_replay_buffer_size = 1e5 hidden_layer_sizes = (128, 128) max_path_length = 1 agent_num = 2 env = DifferentialGame(game_name, agent_num) agents = [] agent_types = agent_setting.split("_") assert len(agent_types) == agent_num for i, agent_type in enumerate(agent_types): agents.append( get_agent_by_type( agent_type, i, env, hidden_layer_sizes=hidden_layer_sizes, max_replay_buffer_size=max_replay_buffer_size, ) ) sampler = MASampler( agent_num, batch_size=batch_size, max_path_length=max_path_length ) sampler.initialize(env, agents) trainer = MATrainer( env=env, agents=agents, sampler=sampler, steps=training_steps, exploration_steps=exploration_steps, training_interval=1, extra_experiences=["annealing", "recent_experiences"], batch_size=batch_size, ) trainer.run() def main(): settings = [ "ROMMEO_ROMMEO", ] game = "ma_softq" for setting in settings: processes = [] for e in range(1): seed = 1 + int(23122134 / (e + 1)) def train_func(): train_fixed(seed, setting, game) # # # Awkward hacky process runs, because Tensorflow does not like p = Process(target=train_func, args=tuple()) p.start() processes.append(p) for p in processes: p.join() if __name__ == "__main__": main()
1.992188
2
hypha/apply/projects/templatetags/payment_request_tools.py
maxpearl/hypha
16
11543
import decimal from django import template register = template.Library() @register.simple_tag def can_change_status(payment_request, user): return payment_request.can_user_change_status(user) @register.simple_tag def can_delete(payment_request, user): return payment_request.can_user_delete(user) @register.simple_tag def can_edit(payment_request, user): return payment_request.can_user_edit(user) @register.simple_tag def percentage(value, total): if not total: return decimal.Decimal(0) unrounded_total = (value / total) * 100 # round using Decimal since we're dealing with currency rounded_total = unrounded_total.quantize( decimal.Decimal('0.0'), rounding=decimal.ROUND_DOWN, ) return rounded_total
2.390625
2
pylabnet/hardware/counter/swabian_instruments/qudi/slow_ctr.py
wi11dey/pylabnet
10
11544
""" pylabnet measurement and service classes for Swabian Instruments TimeTagger which implements qudi's SlowCounter interface. This file contains pylabnet wrapper and service classes to allow qudi to access Swabian Instruments TT through pylabnet network as SlowCounter. Steps: - instantiate TimeTagger - instantiate pylabnet-SlowCtrWrap (pass ref to TimeTagger as tagger) - instantiate pylabnet-SlowCtrService and assign module to the created wrapper - start pylabnet-server for SlowCtrService - in qudi, instantiate SlowCtrClient as one of the hardware modules """ from pylabnet.network.core.service_base import ServiceBase import TimeTagger as TT import time import copy import pickle class Wrap: """ Measurement instance which implements qudi's SlowCounter interface. """ def __init__(self, tagger, channel_list, clock_frequency, buffer_size): # References to the device and to TT.Counter measurement self._tagger = tagger self._counter = None # Counter parameters self._channel_list = channel_list self._clock_frequency = clock_frequency self._buffer_size = buffer_size self._bin_width = 0 self._bin_width_sec = 0 def set_up_clock(self, clock_frequency=None, clock_channel=None): """ Sets sample clock frequency for the Counter measurement. :param clock_frequency: (float) sample clock frequency. If not given, configuration value is used :param clock_channel: ignored (internal timebase is used to generate sample clock signal) :return: (int) operation status code: 0 - OK -1 - Error """ # Use config value, if no clock_frequency is specified if clock_frequency is None: clock_frequency = self._clock_frequency # Calculate final bin width bin_width = int(1e12 / clock_frequency) # in picoseconds, for device bin_width_sec = bin_width * 1e-12 # is seconds, for software timing # Set new values param to internal variables self._bin_width = bin_width self._bin_width_sec = bin_width_sec return 0 def set_up_counter(self, counter_channels=None, sources=None, clock_channel=None, counter_buffer=None): """ Configures the actual counter with a given clock. (list of int) [optional] list of channels to count clicks on. If not given, config value is used. :param counter_buffer: (int) [optional] size of the memory buffer. If not given, config value is used. :param counter_channels: ignored This argument should not be used. Counter GUI initializes set of plot curves self.curves during its on_activate() method. It basically calls counter_hardware.get_counter_channels() and uses this list to init self.curves Only after that user can click "Start" button, which will call set_up_counter(). And since GUI already has inited set of curves, set of channels must not be modified here! It will case GUI to fail. :param sources: ignored :param clock_channel: ignored :return: (int) operation status code: 0 - OK -1 - Error """ # Set counter channels if counter_channels is not None: channel_list = counter_channels else: channel_list = self._channel_list # apply counter channel change self.set_counter_channels(channel_list=channel_list) # Set buffer size if counter_buffer is not None: buffer_size = counter_buffer else: buffer_size = self._buffer_size # sanity check: if not isinstance(buffer_size, int) or buffer_size <= 0: # self.log.error('set_up_counter(): invalid parameter value counter_buffer = {}.' # 'This parameter must be a positive integer.' # ''.format(buffer_size)) return -1 # apply buffer size change self._buffer_size = buffer_size # Create instance of Counter measurement try: self._counter = TT.Counter( tagger=self._tagger, channels=self._channel_list, binwidth=self._bin_width, n_values=self._buffer_size ) # handle initialization error (TT functions always produce NotImplementedError) except NotImplementedError: self._counter = None # self.log.error('set_up_counter(): failed to instantiate TT.Counter measurement') return -1 # Start Counter # (TT.Counter measurement starts running immediately after instantiation, # so it is necessary to erase all counts collected since instantiation) self._counter.stop() self._counter.clear() self._counter.start() return 0 def close_clock(self): """ Closes the clock. :return: (int) error code: 0 - OK -1 - Error """ # self._bin_width = 0 # self._bin_width_sec = 0 return 0 def close_counter(self): """ Closes the counter and cleans up afterwards. :return: (int) error code: 0 - OK -1 - Error """ # Try stopping and clearing TT.Counter measurement try: self._counter.stop() self._counter.clear() # Handle the case of exception in TT function call (NotImplementedError) # and the case of self._ctr = None (AttributeError) except (NotImplementedError, AttributeError): pass # Remove reference to the counter # self._ctr = None # Clear counter parameters # self._buffer_size = [] # Do not clear channel list: # Counter GUI inits its list of curves self.curves # by calling counter_hardware.get_counter_channels() before # calling counter_hardware.set_up_counter() # If one clears _channel_list here, GUI will fail at the next # "Start" button click after reloading. # # self._channel_list = [] return 0 def get_counter(self, samples=1): """ Returns the current counts per second of the counter. :param samples: (int) [optional] number of samples to read in one go (default is one sample) :return: numpy.array((samples, uint32), dtype=np.uint32) array of count rate [counts/second] arrays of length samples for each click channel Empty array [] is returned in the case of error. """ # Sanity check: samples has valid value if samples != 1: if not isinstance(samples, int) or samples <= 0: # self.log.error('get_counter(): invalid argument samples={0}. This argument must be a positive integer' # ''.format(samples)) return [] # MORE SOPHISTICATED VERSION # (WORKS TOO SLOWLY: PROBABLY BECAUSE OF SLOW INTEGER DIVISION OF LARGE INTEGERS) # # start_time = time.time() # while time.time() - start_time < self._timeout: # new_complete_bins = self._ctr.getCaptureDuration() // self._bin_width - self._last_read_bin # # self._overflow = new_complete_bins # # self.log.error('new_complete_bins = {}'.format(new_complete_bins)) # # if new_complete_bins < samples: # time.sleep(self._bin_width_sec/2) # continue # elif new_complete_bins == samples: # self._last_read_bin += new_complete_bins # break # else: # # self.log.warn('Counter is overflowing. \n' # # 'Software pulls data in too slowly and counter bins are too short, ' # # 'such that some bins are lost. \n' # # 'Try reducing sampling rate or increasing oversampling') # self._last_read_bin += new_complete_bins # break # Wait for specified number of samples (samples parameter) to be accumulated # # This approach is very naive and is more or less accurate for # clock frequency below 50 Hz. # # For higher frequencies, the actual time sampling interval is determined # by software delays (about 1 ms). Counter measurement overflows # (most of the samples are over-written before software reads them in) # but does not fail. The only problem here is that time axis on the count-trace # graph is no longer accurate: # the difference between consecutive tick labels is much smaller than the actual # time interval between measured samples (about 1 ms) time.sleep(samples * self._bin_width_sec) # read-in most recent 'samples' samples try: count_array = self._counter.getData()[:, -samples:] except NotImplementedError: # self.log.error('get_counter() reading operation failed') return [] except AttributeError: # self.log.error('get_counter(): counter was not initialized') return [] # Calculate count rate [count/sec] count_rate_array = count_array / self._bin_width_sec return count_rate_array def get_counter_channels(self): """ Returns the list of click channel numbers. :return: (list of int) list of click channel numbers """ return copy.deepcopy(self._channel_list) def set_counter_channels(self, channel_list=None): """ Set click channel list. Notice that this method only modifies internal variable _channel_list. To apply the change to the counter, one has to call set_up_counter() again. :param channel_list: (list of int) list of channels to count clicks on :return: (list of int) actual list of click channels """ if channel_list is None: return self.get_counter_channels() # Sanity check: all_channels = self._get_all_channels() if not set(channel_list).issubset(set(all_channels)): # self.log.error('set_counter_channels(): requested list of channels is invalid: ' # 'some channels are not present on the device.' # 'requested list: {0} \n' # 'available channels: {1}' # ''.format(channel_list, all_channels)) return self.get_counter_channels() # Apply changes to internal variable self._channel_list self._channel_list = channel_list # Sort channel numbers, such that channel order does not depend # on order of numbers in the config file self._channel_list.sort() return self.get_counter_channels() def _get_all_channels(self): """ Return list of all channels available on the device. Positive/negative values correspond to rising/falling edge detection. For example: 1 means 'rising edge on connector 1' -1 means 'falling edge on connector 1 :return: (list of int) list of all available channel numbers, including edge sign. """ try: available_channel_tuple = list( self._tagger.getChannelList(TT.TT_CHANNEL_RISING_AND_FALLING_EDGES) ) # handle exception in the call (TT functions normally produce NotImplementedError) except NotImplementedError: # self.log.error('_get_all_channels(): communication with the device failed') return [] # handle the case of self._tagger = None except AttributeError: # self.log.error('_get_all_channels(): _tagger is None. Initialize device first') return [] return list(available_channel_tuple) class Service(ServiceBase): def exposed_set_up_clock(self, clock_frequency=None, clock_channel=None): """ Sets sample clock frequency for the Counter measurement. :param clock_frequency: (float) sample clock frequency. If not given, configuration value is used :param clock_channel: ignored (internal timebase is used to generate sample clock signal) :return: (int) operation status code: 0 - OK -1 - Error """ return self._module.set_up_clock( clock_frequency=clock_frequency, clock_channel=clock_channel ) def exposed_set_up_counter(self, counter_channels=None, sources=None, clock_channel=None, counter_buffer=None): """ Configures the actual counter with a given clock. (list of int) [optional] list of channels to count clicks on. If not given, config value is used. :param counter_buffer: (int) [optional] size of the memory buffer. If not given, config value is used. :param counter_channels: ignored This argument should not be used. Counter GUI initializes set of plot curves self.curves during its on_activate() method. It basically calls counter_hardware.get_counter_channels() and uses this list to init self.curves Only after that user can click "Start" button, which will call set_up_counter(). And since GUI already has inited set of curves, set of channels must not be modified here! It will case GUI to fail. :param sources: ignored :param clock_channel: ignored :return: (int) operation status code: 0 - OK -1 - Error """ return self._module.set_up_counter( counter_channels=counter_channels, sources=sources, clock_channel=clock_channel, counter_buffer=counter_buffer ) def exposed_close_clock(self): """ Closes the clock. :return: (int) error code: 0 - OK -1 - Error """ return self._module.close_clock() def exposed_close_counter(self): """ Closes the counter and cleans up afterwards. :return: (int) error code: 0 - OK -1 - Error """ return self._module.close_ctr() def exposed_get_counter(self, samples=1): """ Returns the current counts per second of the counter. :param samples: (int) [optional] number of samples to read in one go (default is one sample) :return: numpy.array((samples, uint32), dtype=np.uint32) array of count rate [counts/second] arrays of length samples for each click channel Empty array [] is returned in the case of error. """ res = self._module.get_counter(samples=samples) return pickle.dumps(res) def exposed_get_counter_channels(self): """ Returns the list of click channel numbers. :return: (list of int) list of click channel numbers """ res = self._module.get_counter_channels() return pickle.dumps(res)
2.25
2
dev0s/classes/defaults/files.py
vandenberghinc/dev0s
1
11545
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Must still be recoded with some cleaner code. """ # imports. from dev0s.classes.config import * from dev0s.classes import utils from dev0s.classes.defaults.color import color, symbol from dev0s.classes import console from dev0s.classes.defaults.exceptions import Exceptions # pip. from datetime import datetime, timezone import shutil, math from PIL import Image as _Image_ """ Notes. All default files & formats must exact the same as the default dict, bool, list etc in the native sense. There are lots additionals though. But a dict and Dictionary should be able to be used universally as if the user would not know the difference (which could very quickly in some instances). """ # the format classes. class Formats(): # variables. digits = [0,1,2,3,4,5,6,7,8,9,] str_digits = ["0","1","2","3","4","5","6","7","8","9"] alphabet, capitalized_alphabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"], [] for i in alphabet: capitalized_alphabet.append(i.upper()) special_characters = ["±","§","!","@","€","#","£","$","¢","%","∞","^","&","ª","(",")","–","_","+","=","{","}","[","]",";",":","'",'"',"|","\\","//","?",">",".",",","<"] # check & get format / instance. def check( nones=None, booleans=None, none_allowed_booleans=None, strings=None, none_allowed_strings=None, integers=None, none_allowed_integers=None, bytes_=None, none_allowed_bytes=None, arrays=None, none_allowed_arrays=None, dictionaries=None, none_allowed_dictionaries=None, ): if nones != None: for key,value in nones.items(): if value == None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [!null].") if booleans != None: for key,value in booleans.items(): if not isinstance(value, bool): raise ValueError(f"Invalid [{key}] format [{value}], required format is [bool].") if none_allowed_booleans != None: for key,value in none_allowed_booleans.items(): if not isinstance(value, bool) and value != None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [bool].") if strings != None: for key,value in strings.items(): if not isinstance(value, str): raise ValueError(f"Invalid [{key}] format [{value}], required format is [str].") if none_allowed_strings != None: for key,value in none_allowed_strings.items(): if not isinstance(value, str) and value != None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [str].") if integers != None: for key,value in integers.items(): if not isinstance(value, int): raise ValueError(f"Invalid [{key}] format [{value}], required format is [int].") if none_allowed_integers != None: for key,value in none_allowed_integers.items(): if not isinstance(value, int) and value != None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [int].") if bytes_ != None: for key,value in bytes_.items(): if not isinstance(value, bytes): raise ValueError(f"Invalid [{key}] format [{value}], required format is [bytes].") if none_allowed_bytes != None: for key,value in none_allowed_bytes.items(): if not isinstance(value, bytes) and value != None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [bytes].") if arrays != None: for key,value in arrays.items(): if not isinstance(value, list): raise ValueError(f"Invalid [{key}] format [{value}], required format is [list].") if none_allowed_arrays != None: for key,value in none_allowed_arrays.items(): if not isinstance(value, list) and value != None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [list].") if dictionaries != None: for key,value in dictionaries.items(): if not isinstance(value, dict): raise ValueError(f"Invalid [{key}] format [{value}], required format is [dict].") if none_allowed_dictionaries != None: for key,value in none_allowed_dictionaries.items(): if not isinstance(value, dict) and value != None: raise ValueError(f"Invalid [{key}] format [{value}], required format is [dict].") def get(value, serialize=False): if value == None: return None elif isinstance(value, bool): if not serialize: return bool else: return "bool" elif isinstance(value, str): if not serialize: return str else: return "str" elif isinstance(value, int): if not serialize: return int else: return "int" elif isinstance(value, bytes): if not serialize: return bytes else: return "bytes" elif isinstance(value, list): if not serialize: return list else: return "list" elif isinstance(value, dict): if not serialize: return dict else: return "dict" elif isinstance(value, Boolean) or value.__class__.__name__ == "Boolean": if not serialize: return Boolean else: return "Boolean" elif isinstance(value, String) or value.__class__.__name__ == "String": if not serialize: return String else: return "String" elif isinstance(value, Integer) or value.__class__.__name__ == "Integer": if not serialize: return Integer else: return "Integer" elif isinstance(value, Bytes) or value.__class__.__name__ == "Bytes": if not serialize: return Bytes else: return "Bytes" elif isinstance(value, Array) or value.__class__.__name__ == "Array": if not serialize: return Array else: return "Array" elif isinstance(value, Dictionary) or value.__class__.__name__ == "Dictionary": if not serialize: return Dictionary else: return "Dictionary" elif isinstance(value, FilePath) or value.__class__.__name__ == "FilePath": if not serialize: return FilePath else: return "FilePath" elif isinstance(value, object): if not serialize: return object else: return "object" else: raise ValueError(f"Unknown format [{value}].") # # try to parse variable to format, when failed it returns None. def parse( # the variable to parse (required) (#1). variable, # the expected format (required) (#2). format=None, # with safe disabled it throws a ParseError when the variable can't be parsed to the expected format. safe=True, # the default return value for when safe is enabled. default=None, ): if format in [bool, "bool", Boolean, "Boolean"]: try: return bool(variable) except: if safe: return default else: raise Exceptions.ParseError(f"Unable to parse a bool from ({variable.__class__.__name__}) [{variable}].") elif format in [int, "int"]: try: return int(variable) except: if safe: return default else: raise Exceptions.ParseError(f"Unable to parse a int from ({variable.__class__.__name__}) [{variable}].") elif format in [float, "float", Integer, "Integer"]: try: return float(variable) except: if safe: return default else: raise Exceptions.ParseError(f"Unable to parse a float from ({variable.__class__.__name__}) [{variable}].") elif format in [str, "str", String, "String"]: try: return str(variable) except: if safe: return default else: raise Exceptions.ParseError(f"Unable to parse a str from ({variable.__class__.__name__}) [{variable}].") elif format in [list, "list", Array, "Array"]: if isinstance(variable, (list,Array)): return variable elif not isinstance(variable, (str, String)): if safe: return default else: raise Exceptions.ParseError(f"Unable to parse an array from ({variable.__class__.__name__}) [{variable}].") try: return ast.literal_eval(variable) except: try: return json.loads(variable) except: if safe: return default else: raise Exceptions.ParseError(f"Unable to parse an array from ({variable.__class__.__name__}) [{variable}].") elif format in [dict, "dict", Dictionary, "Dictionary"]: if isinstance(variable, (dict,Dictionary)): return variable elif not isinstance(variable, (str, String)): raise Exceptions.ParseError(f"Unable to parse a dict from ({variable.__class__.__name__}) [{variable}].") try: return ast.literal_eval(variable) except: try: return json.loads(variable) except: if safe: return default else: raise Exceptions.ParseError(f"Unable to parse a dict from ({variable.__class__.__name__}) [{variable}].") else: raise Exceptions.InvalidUsage(f"Specified format [{format}] is not a valid format option.") # # initialize from default format to dev0s format. def initialize(variable, file_paths=True): if variable.__class__.__name__ in ["str","String"]: if file_paths and "/" in variable and Files.exists(variable): return FilePath(variable) else: return String(variable) elif variable.__class__.__name__ in ["bool","Boolean"]: return Boolean(variable) elif variable.__class__.__name__ in ["int","float","Integer"]: return Integer(variable) elif variable.__class__.__name__ in ["dict","Dictionary"]: return Dictionary(variable) elif variable.__class__.__name__ in ["list","Array"]: return Array(variable) else: return variable # # denitialize from dev0s formats to default format. def denitialize(variable, file_paths=True): if variable.__class__.__name__ in ["String"]: return str(variable) elif variable.__class__.__name__ in ["FilePath"]: return str(variable) elif variable.__class__.__name__ in ["Boolean"]: return bool(variable) elif variable.__class__.__name__ in ["Integer"]: return variable.value elif variable.__class__.__name__ in ["Dictionary", "ResponseObject", "OutputObject", "dict"]: new = {} for key,value in variable.items(): new[key] = Formats.denitialize(value, file_paths=file_paths) return new elif variable.__class__.__name__ in ["Array", "list"]: new = [] for value in variable: new.append(Formats.denitialize(value, file_paths=file_paths)) return new else: return variable # # the file path object class. class FilePath(object): def __init__(self, path, default=False, check=False, load=False): # docs. DOCS = { "module":"FilePath", "initialized":False, "description":[], "chapter": "Defaults", } # init. self.path = str(self.clean(path=str(path), raw=True)) if check == False and default == False and path != False: if Files.directory(self.path) and self.path[len(self.path)-1] != '/': self.path += '/' if check and os.path.exists(self.path) == False: raise FileNotFoundError(f"Path [{self.path}] does not exist.") self.ownership = self.Ownership(path=self.path, load=load) self.permission = self.Permission(path=self.path, load=load) # # - info: def join(self, name=None, type="/"): if type not in ["", "/"] and "." not in type: type = "." + type path = self.path if path[len(path)-1] != "/": path += '/' return FilePath("{}{}{}".format(path, name, type)) def name(self, path=None, remove_extension=False,): if path == None: path = self.path if path in [False, None]: return None x = 1 if path[len(path)-1] == '/': x += 1 name = path.split('/')[len(path.split('/'))-x] if remove_extension: count = len(name.split(".")) if count > 1: c, s = 0, None for i in name.split("."): if c < count-1: if s == None: s = i else: s += "."+i c += 1 name = s return name def extension(self, name=None, path=None): if path == None: path = self.path # - check directory: extension = None if name == None and Files.directory(path): extension = 'dir' else: # - get extension: try: if name == None: name = self.name(path=path) extension = name.split('.')[len(name.split('.'))-1] except: try: name = self.name(path=path) extension = name.split('.')[len(name.split('.'))-1] except: extension = None # - check image & video: if extension in ["jpg", "png", "gif", "webp", "tiff", "psd", "raw", "bmp", "heig", "indd", "jpeg", "svg", "ai", "eps", "pdf"]: extension = "img" elif extension in ["mp4", "m4a", "m4v", "f4v", "f4a", "m4b", "m4r", "f4b", "mov", "3gp", "3gp2", "3g2", "3gpp", "3gpp2", "h.263", "h.264", "hevc", "mpeg4", "theora", "3gp", "windows media 8", "quicktime", "mpeg-4", "vp8", "vp6", "mpeg1", "mpeg2", "mpeg-ts", "mpeg", "dnxhd", "xdcam", "dv", "dvcpro", "dvcprohd", "imx", "xdcam", "hd", "hd422"]: extension = "video" return extension def base(self, # the path (leave None to use self.path) (param #1). path=None, # the dirs back. back=1, ): if path == None: path = self.path return Files.base(path=path, back=back) # def basename(self, back=1, path=None): if path == None: path = self.path return self.name(path=self.base(back=back, path=path)) def size(self, format=str, mode="auto", path=None, options=["auto", "bytes", "kb", "mb", "gb", "tb"]): def __size__(path): total = 0 try: # print("[+] Getting the size of", directory) for entry in os.scandir(path): if entry.is_file(): # if it's a file, use stat() function total += entry.stat().st_size elif entry.is_dir(): # if it's a directory, recursively call this function total += __size__(entry.path) except NotADirectoryError: # if `directory` isn't a directory, get the file size then return os.path.getsize(path) except PermissionError: # if for whatever reason we can't open the folder, return 0 return 0 return total # if path == None: path = self.path if path != None: path = str(path) return self.convert_bytes(__size__(path), format=format, mode=mode) def space(self, format=str, mode="auto", path=None, options=["auto", "bytes", "kb", "mb", "gb", "tb"]): if path == None: path = self.path total, used, free = shutil.disk_usage(path) total, used, free = self.convert_bytes(total, format=format, mode=mode), self.convert_bytes(used, format=format, mode=mode), self.convert_bytes(free, format=format, mode=mode) return { "total":total, "used":used, "free":free, } def convert_bytes(self, bytes:int, format=str, mode="auto", options=["auto", "bytes", "kb", "mb", "gb", "tb"]): if format in [float, "float", "integer", "Integer", Integer]: format = float if (mode == "bytes" or mode == "bytes".upper()): return float(bytes) elif format in [int, "int", "integer", "Integer", Integer]: format = int if (mode == "bytes" or mode == "bytes".upper()): return int(round(bytes,0)) if mode == "auto": if int(bytes/1024**4) >= 10: bytes = round(bytes/1024**4,2) if format not in [int, float]: bytes = '{:,} TB'.format(bytes)#.replace(',', '.') elif int(bytes/1024**3) >= 10: bytes = round(bytes/1024**3,2) if format not in [int, float]: bytes = '{:,} GB'.format(bytes)#.replace(',', '.') elif int(bytes/1024**2) >= 10: bytes = round(bytes/1024**2,2) if format not in [int, float]: bytes = '{:,} MB'.format(bytes)#.replace(',', '.') elif int(bytes/1024) >= 10: bytes = round(bytes/1024,2) if format not in [int, float]: bytes = '{:,} KB'.format(bytes)#.replace(',', '.') else: bytes = int(round(bytes,0)) if format not in [int, float]: bytes = '{:,} Bytes'.format(bytes)#.replace(',', '.') elif (mode == "bytes" or mode == "bytes".upper()): bytes = int(round(bytes,0)) if format not in [int, float]: bytes = '{:,} Bytes'.format(bytes)#.replace(',', '.') elif mode == "kb" or mode == "kb".upper(): bytes = round(bytes/1024,2) if format not in [int, float]: bytes = '{:,} KB'.format(bytes)#.replace(',', '.') elif mode == "mb" or mode == "mb".upper(): bytes = round(bytes/1024**2,2) if format not in [int, float]: bytes = '{:,} MB'.format(bytes)#.replace(',', '.') elif mode == "gb" or mode == "gb".upper(): bytes = round(bytes/1024**3,2) if format not in [int, float]: bytes = '{:,} GB'.format(bytes)#.replace(',', '.') elif mode == "tb" or mode == "tb".upper(): bytes = round(bytes/1024**4,2) if format not in [int, float]: bytes = '{:,} TB'.format(bytes)#.replace(',', '.') else: raise Exceptions.InvalidUsage(f"Selected an invalid size format [{format}], options {options}.") return bytes def exists(self, # the path (leave None to use self.path) (#1). path=None, # root permission required. sudo=False, ): if path == None: path = self.path path = gfp.clean(path=path, remove_double_slash=True, remove_last_slash=True) path = str(path) if not sudo: return os.path.exists(str(path)) else: try: output = utils.__execute__(["sudo", "ls","-ld",path]) if "No such file or directory" in str(output): return False else: return True except: return False # def mount(self, # the path (leave None to use self.path) (#1). path=None, ): if path == None: path = self.path path = gfp.clean(path=path, remove_double_slash=True, remove_last_slash=True) return os.path.ismount(path) # def directory(self, # the path (leave None to use self.path) (#1). path=None, ): if path == None: path = self.path return Files.directory(path) # def mtime(self, format='%d-%m-%y %H:%M.%S', path=None): if path == None: path = self.path fname = pathlib.Path(path) try: mtime = fname.stat().st_mtime except: mtime = fname.stat().ct_mtime if format in ['s', "seconds"]: return mtime else: return Formats.Date().from_seconds(mtime, format=format) def clean(self, # the path (leave None to use self.path) (param #1). path=None, # the clean options. remove_double_slash=True, remove_first_slash=False, remove_last_slash=False, ensure_first_slash=False, ensure_last_slash=False, # return the path as a raw string. raw=False, ): if path == None: path = self.path if not isinstance(path, (str, String)): return path path = str(path).replace("~",HOME) while True: if remove_double_slash and "//" in path: path = path.replace("//","/") elif remove_first_slash and len(path) > 0 and path[0] == "/": path = path[1:] elif remove_last_slash and len(path) > 0 and path[len(path)-1] == "/": path = path[:-1] elif ensure_first_slash and len(path) > 0 and path[0] != "/": path = "/"+path elif ensure_last_slash and len(path) > 0 and path[len(path)-1] != "/": path += "/" else: break if raw: return path else: return FilePath(path) def absolute(self, # the path (leave None to use self.path) (param #1). path=None, ): if path == None: path = self.path return FilePath(os.path.abspath(path)) # path to python module path. def module(self, path=None): if path == None: path = self.path return gfp.clean(path=path, remove_double_slash=True, remove_last_slash=True, remove_first_slash=True).replace("/",".").replace(".py","").replace(".__init__", "").replace("__init__", "") # serialize a requirements file. def requirements(self, path=None, format="pip", include_version=True): if format in ["pip3"]: format = "pip" if format not in ["pip"]: raise ValueError(f"Invalid usage, format [{format}] is not a valid option, options: [pip].") # pip requirements. if format == "pip": requirements = [] for i in Files.load(path).split("\n"): if len(i) > 0 and i[0] != "#" and i not in [""," "]: while True: if len(i) > 0 and i[len(i)-1] in [" "]: i = i[:-1] else: break if " " not in i: sid = None for lid in ["==", ">=", "<="]: if lid in i: sid = lid ; break if sid != None: if include_version: requirements.append(i) else: requirements.append(i.split(sid)[0]) else: requirements.append(i) return requirements # - commands: def delete(self, # the path (leave None to use self.path) (param #1). path=None, # the options. forced=False, sudo=False, silent=False, ): if path == None: path = self.path if silent: silent = ' 2> /dev/null' else: silent = "" if sudo: sudo = "sudo " else: sudo = "" options = " " if forced: options = " -f " if Files.directory(path): options = " -fr " elif Files.directory(path): options = " -r " os.system(f"{sudo}rm{options}{path}{silent}") def move(self, # the to path (#1). path=None, # root permission required. sudo=False, # root permission required. log_level=0, ): return Files.move( # the from & to path (#1 & #2). self.path, path, # root permission required. sudo=sudo, # root permission required. log_level=log_level, ) self.path = gfp.clean(path=path) def copy(self, # the to path (#1). path=None, # root permission required. sudo=False, # the active log level. log_level=0, # the exclude patterns. exclude=[], # update deleted files. delete=True, ): return Files.copy( # the from & to path (#1 & #2). self.path, path, # root permission required. sudo=sudo, # the active log level. log_level=log_level, # the exclude patterns. exclude=exclude, # update deleted files. delete=delete,) def open(self, sudo=False): if sudo: sudo = "sudo " else: sudo = "" if OS in ["macos"]: os.system(f"{sudo}open {self.path}") elif OS in ["linux"]: os.system(f"{sudo}nautulis {self.path}") else: raise Exceptions.InvalidOperatingSystem(f"Unsupported operating system [{OS}].") def create(self, # Option 1: (creating a directory) # - boolean format: directory=False, # Option 2: (creating any file extension) # - string format: data="", # Options: # - integer format: permission=None, # - string format: owner=None, group=None, # - boolean format: sudo=False, ): # - option 1: if directory: if sudo: os.system('sudo mkdir -p '+self.path) else: os.system('mkdir -p '+self.path) # - option 2: elif data != None: if sudo: f = Files.File(path='/tmp/tmp_file', data=data) f.save() os.system(f"sudo mv {f.file_path.path} {self.path}") else: Files.File(path=self.path, data=data).save() #with open # - invalid option: else: raise ValueError("Invalid option, either enable the [directory] boolean to create a directory, or specify [path] and [data] to create any file sort.") # - default: if owner != None or group != None: self.ownership.set(owner=owner, group=group, sudo=sudo) if permission != None: self.permission.set(permission, sudo=sudo) # def check(self, # Option 1: (creating a directory) # - boolean format: directory=False, # Option 2: (creating any file extension) # - string format: data="", # Options: # - integer format: permission=None, # - string format: owner=None, group=None, # - boolean format: sudo=False, silent=False, recursive=False, # for directories only (for permission & ownership check) ): # - option 1: if not self.exists(sudo=sudo): self.create(directory=directory, data=data, permission=permission, owner=owner, group=group, sudo=sudo) else: # - default: self.ownership.check(owner=owner, group=group, sudo=sudo, silent=silent, recursive=recursive) self.permission.check(permission=permission, sudo=sudo, silent=silent, recursive=recursive) # # support default str functions. def split(self, path): return Files.Array(self.path.split(str(path))) def count(self, path): return Formats.Integer(self.path.count(str(path))) def replace(self, from_, to_): return self.path.replace(str(from_), str(to_)) def lower(self, path): return self.path.lower(str(path)) def upper(self, path): return self.path.upper(str(path)) # support subscriptionable. def __getitem__(self, index): return self.path[Formats.denitialize(index)] def __setitem__(self, index, value): self.path[Formats.denitialize(index)] = str(value) # support "+" & "-" . def __add__(self, path): if isinstance(path, str): a=1 elif isinstance(path, self.__class__): path = path.path elif not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {path.__class__}.") return self.path + path def __sub__(self, path): if isinstance(path, str): a=1 elif isinstance(path, self.__class__): path = path.path elif not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {path.__class__}.") return self.path.replace(path, "") # support +. def __concat__(self, path): if isinstance(path, str): a=1 elif isinstance(path, self.__class__): path = path.path elif not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {path.__class__}.") return self.path + path # support default iteration. def __iter__(self): return iter(self.path) # support '>=' & '>' operator. def __gt__(self, path): if not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {path.__class__}.") return len(self.path) > len(path.path) def __ge__(self, path): if not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {path.__class__}.") return len(self.path) >= len(path.path) # support '<=' & '<' operator. def __lt__(self, path): if not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {path.__class__}.") return len(self.path) < len(path.path) def __le__(self, path): if not isinstance(path, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {path.__class__}.") return len(self.path) <= len(path.path) # support '==' & '!=' operator. def __eq__(self, path): if isinstance(path, str): return self.path == path elif not isinstance(path, self.__class__): return False return self.path == path.path def __ne__(self, path): if isinstance(path, str): return self.path != path elif not isinstance(path, self.__class__): return True return self.path != path.path # support 'in' operator. def __contains__(self, path): if isinstance(path, (list, Files.Array)): for i in path: if i in self.path: return True return False else: return path in self.path # # int representation. def __repr__(self): return str(self) # str representation. def __str__(self): return str(self.path) # int representation. def __int__(self): return int(self.path) # float representation. def __float__(self): return float(self.path) # bool representation. def __bool__(self): if self.path in [1.0, 1, "true", "True", "TRUE", True]: return True elif self.path in [0, 0.0, "false", "False", "FALSE", False]: return False else: raise Exceptions.FormatError(f"Could not parse a bool from {self.__id__()}.") # content count. def __len__(self): return len(self.path) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "FilePath" # @property def __name__(self): return self.instance() # support self assignment. def assign(self, path, load=False): if isinstance(path, self.__class__): path = path.path self.path = gfp.clean(path=path) self.ownership = self.Ownership(path=self.path, load=load) self.permission = self.Permission(path=self.path, load=load) return self # return raw data. def raw(self): return self.path # - objects: class Ownership(object): def __init__(self, path=None, load=False): # docs. DOCS = { "module":"FilePath.Ownership", "initialized":False, "description":[], "chapter": "Defaults", } # init. self.path = path self.owner = None self.group = None if load: get = self.get() self.owner = get["owner"] self.group = get["permission"] # - info: def get(self, path=None): if path == None: path = self.path owner = pwd.getpwuid(os.stat(path).st_uid).pw_name try: group = grp.getgrgid(os.stat(path).st_gid).gr_name except KeyError: # unknown group likely from different os / machine. group = os.stat(path).st_gid except Exception as e: raise ValueError(f"Unable to retrieve the group of file {path}, error: {e}.") return owner, group def set(self, # the permission (str) (#1). owner=None, # the group (str) (optional) (#2). group=None, # the path (optional) (overwrites self.path) (#3). path=None, # root permission required. sudo=False, # recursive. recursive=False, # silent. silent=False, ): if path == None: path = self.path if group == None: if OS in ["macos"]: group = "wheel" elif OS in ["linux"]: group = "root" else: raise ValueError("Unsupported operating system [{}].".format(OS)) silent_option = "" if silent: silent_option = ' 2> /dev/null' if recursive: if sudo: os.system("sudo chown -R {} {} {}".format(owner+":"+group, path, silent_option)) else: os.system("chown -R {} {}".format(owner+":"+group, path)) else: if sudo: os.system("sudo chown {} {} {}".format(owner+":"+group, path, silent_option)) else: os.system("chown {} {} {}".format(owner+":"+group, path, silent_option)) def check(self, owner=None, group=None, sudo=False, silent=False, iterate=False, recursive=False, path=None): # combine [recursive] and [iterate] to walk all set all files in an directory and check it with the given permission. if path == None: path = self.path if group == None: if OS in ["macos"]: group = "wheel" elif OS in ["linux"]: group = "root" else: raise ValueError("Unsupported operating system [{}].".format(OS)) _owner_, _group_ = self.get(path=path) if _owner_ != owner or _group_ != group: self.set(owner=owner, group=group, sudo=sudo, silent=silent, recursive=recursive, path=path) if recursive and iterate and Files.directory(self.path): for dirpath, subdirs, files in os.walk(self.path): for path in subdirs: #print("DIRECTORY:",path) #print("> FULL PATH NAME:",dirpath+"/"+path) if path not in ["lost+found"]: file_path = Formats.FilePath(dirpath+"/"+path) file_path.ownership.check(owner=owner, group=group, sudo=sudo, silent=silent) for path in files: #print("FILE NAME:",path) #print("> FULL PATH:",dirpath+"/"+path) file_path = Formats.FilePath(dirpath+"/"+path) file_path.ownership.check(owner=owner, group=group, sudo=sudo, silent=silent) class Permission(object): def __init__(self, path=None, load=False): # docs. DOCS = { "module":"FilePath.Permission", "initialized":False, "description":[], "chapter": "Defaults", } # defaults. #self.__class__.__name__ = "Permission" # init. self.path = path self.permission = None if load: self.permission = self.get() # - info: def get(self, path=None): if path == None: path = self.path status = os.stat(path) permission = oct(status.st_mode)[-3:] return permission def set(self, # the permission (int) (#1). permission=None, # the path (optional) (overwrites self.path) (#2). path=None, # root permission required. sudo=False, # recursive. recursive=False, # silent. silent=False, ): if path == None: path = self.path silent_option = "" if silent: silent_option = ' 2> /dev/null' if recursive: if sudo: os.system("sudo chmod -R {} {} {}".format(permission, path, silent_option)) else: os.system("chmod -R {} {} {}".format(permission, path, silent_option)) else: if sudo: os.system("sudo chmod {} {} {}".format(permission, path, silent_option)) else: os.system("chmod {} {} {}".format(permission, path, silent_option)) def check(self, permission=None, sudo=False, silent=False, iterate=False, recursive=False, path=None): # combine [recursive] and [iterate] to walk all set all files in an directory and check it with the given permission. if path == None: path = self.path if self.get(path=path) != permission: self.set(permission=permission, sudo=sudo, silent=silent, recursive=recursive, path=path) if recursive and iterate and Files.directory(path): for dirpath, subdirs, files in os.walk(path): for path in subdirs: #print("DIR NAME:",path) #print("> FULL PATH:",dirpath+"/"+path) if path not in ["lost+found"]: file_path = Formats.FilePath(dirpath+"/"+path) file_path.permission.check(permission=permission, sudo=sudo, silent=silent) for path in files: #print("FILE NAME:",path) #print("> FULL PATH:",dirpath+"/"+path) file_path = Formats.FilePath(dirpath+"/"+path) file_path.permission.check(permission=permission, sudo=sudo, silent=silent) # # the string object class. class String(object): def __init__(self, # the string's value (str) (#1). string="", # the path (str, FilePath) (param #2). path=False, # load the data on initialization. load=False, # the default array (will be created if file path does not exist). default=None, ): # docs. DOCS = { "module":"String", "initialized":False, "description":[], "chapter": "Defaults", } # init. self.string = str(string) # path. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: self.file_path = self.fp = Formats.FilePath(path) if default != None and not Files.exists(self.file_path.path): self.save(array=default) if load: self.load() # def save(self, string=None, path=None, sudo=False): if string == None: string = self.string if path == None: path = self.file_path.path utils.__check_memory_only__(path) self.string = str(string) return Files.save(path, str(string), format="str", sudo=sudo) def load(self, default=None, sudo=False): utils.__check_memory_only__(self.file_path.path) if not os.path.exists(self.file_path.path) and default != None: self.save(default, sudo=sudo) self.string = Files.load(self.file_path.path, format="str", sudo=sudo) return self.string def is_numerical(self): for i in ["q", "w", "e", "r", "t", "y", "u", "i", "o", "p", "a", "s", "d", "f", "g", "h", "j", "k", "l", "z", "x", "c", "v", "b", "n", "m"]: if i in self.string.lower(): return False return True def bash(self): a = self.string.replace('(','\(').replace(')','\)').replace("'","\'").replace(" ","\ ").replace("$","\$").replace("!","\!").replace("?","\?").replace("@","\@").replace("$","\$").replace("%","\%").replace("^","\^").replace("&","\&").replace("*","\*").replace("'","\'").replace('"','\"') return a def identifier(self): x = self.string.lower().replace(' ','-') return x def variable_format(self, exceptions={ "smart_card":"smartcard", "smart_cards":"smartcards" , "web_server":"webserver" , }, ): s, c = "", 0 for i in self.string: try: n = self.string[c+1] except: n = "none" try: p = self.string[c-1] except: p = "none" if s != "" and i.lower() != i and str(n).lower() == str(n) and str(p).lower() == str(p): s += "_" s += i.lower() c += 1 if s in list(exceptions.keys()): return exceptions[s] else: return s def class_format(self): s, next_capital = "", False for i in self.string: if i == "_": next_capital = True elif next_capital: s += i.upper() else: s += i return s def capitalized_scentence(self): x = self.string.split(" ") cap = [y.capitalize() for y in x] return " ".join(cap) def capitalized_word(self): try: new = self.string[0].upper() c = 0 for i in self.string: if c > 0: new += i c += 1 return new except IndexError: return self.string def generate(self, # the length of the generated string. length=6, # include digits. digits=False, # include capital letters. capitalize=False, # include special characters. special=False, ): charset = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"] if capitalize: for i in ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]: charset.append(i.upper()) if digits: digits = ["1","2","3","4","5","6","7","8","9","0"] else: digits = [] if special: special = ["!", "?", "&", "#","@", "*"] else: special = [] s = "" for i in range(length): if len(digits) > 0 and random.randrange(1,101) <= 40: s += digits[random.randrange(0, len(digits))] elif len(special) > 0 and random.randrange(1,101) <= 10: s += special[random.randrange(0, len(special))] else: s += charset[random.randrange(0, len(charset))] return s # # iterate a string (backwards) to check the first occurency of a specified charset. def first_occurence(self, charset=[" ", "\n"], reversed=False, string=None): if string == None: string = self.string if reversed: c, space_newline_id = len(string)-1, "" for _ in string: char = string[c] if char in charset: a = 0 for i in charset: if i == char: return i c -= 1 return None else: c, space_newline_id = 0, "" for _ in string: char = string[c] if char in charset: a = 0 for i in charset: if i == char: return i c += 1 return None # splice a string into before/after by a first occurence. # if include is True and both include_before and inluce_after are False it includes at before. def before_after_first_occurence(self, slicer=" ", include=True, include_before=False, include_after=False, string=None): if isinstance(slicer, list): first = self.first_occurence(charset=slicer, string=string) return self.before_after_first_occurence(slicer=first, include=include, include_before=include_before, include_after=include_after, string=string) else: if string == None: string = self.string before, after, slice_count, slices, _last_ = "", "", string.count(slicer), 0, "" for char in string: if len(_last_) >= len(slicer): _last_ = _last_[1:] _last_ += char if _last_ == slicer: slices += 1 if include: if slices != slice_count or include_before: before += char elif include_after: after += char else: before += char elif slices > 0: after += char else: before += char return before, after # splice a string into before/selected/after by a first occurence. def before_selected_after_first_occurence(self, slicer=" ", string=None): if string == None: string = self.string before, selected, after, slice_count, open, _last_ = "", "", "", string.count(slicer), False, "" selected_sliced_count = 0 for char in string: if isinstance(slicer, str) and len(_last_) >= len(slicer): _last_ = _last_[1:] elif isinstance(slicer, list) and len(_last_) >= len(slicer[selected_sliced_count]): _last_ = _last_[1:] _last_ += char if (isinstance(slicer, str) and _last_ == slicer) or (isinstance(slicer, list) and _last_ == slicer[selected_sliced_count]): selected_sliced_count += 1 selected += char if open: open = False else: open = True elif open: after += char else: before += char return before, selected, after # splice a string into before/after by a last occurence. # if include is True and both include_before and inluce_after are False it includes at before. def before_after_last_occurence(self, slicer=" ", include=True, include_before=False, include_after=False, string=None): if string == None: string = self.string before, after, slice_count, slices, _last_ = "", "", string.count(slicer), 0, "" for char in string: if len(_last_) >= len(slicer): _last_ = _last_[1:] _last_ += char if _last_ == slicer: slices += 1 if include: if slices != slice_count or include_before: before += char elif include_after: after += char else: before += char elif slices == slice_count: after += char else: before += char return before, after # splice a string into before/selected/after by a last occurence. def before_selected_after_last_occurence(self, slicer=" ", string=None): if string == None: string = self.string before, selected, after, slice_count, slices, _last_ = "", "", "", string.count(slicer), 0, "" for char in string: if len(_last_) >= len(slicer): _last_ = _last_[1:] _last_ += char if _last_ == slicer: slices += 1 selected += char elif slices == slice_count: after += char else: before += char return before, selected, after # get the first text between an 2 string identifiers [start,end] by depth. # identifiers must be parameter number 1. def between(self, identifiers=["{","}"], depth=1, include=True, string=None): # vars. if string == None: string = self.string keep_last = [len(identifiers[0]), len(identifiers[1])] last = ["", ""] unadded = "" s, open, opened, first_open = "", 0, False, False # iterate. for i in string: # set last & unadded. unadded += i last[0] += i last[1] += i if len(last[0]) > keep_last[0]: last[0] = str(String(last[0]).remove_first(1)) if len(last[1]) > keep_last[1]: last[1] = str(String(last[1]).remove_first(1)) # check ids. if last[0] == identifiers[0]: open += 1 first_open = True elif last[1] == identifiers[1]: open -= 1 if open >= depth: if include or open == depth: if include and first_open: s += identifiers[0] unadded = "" first_open = False else: s += unadded unadded = "" opened = True if opened and open < depth: if include: s += unadded unadded = "" break # remainders. if unadded != "" and opened and open < depth: if include: s += unadded unadded = "" # handler. return Formats.String(s) # # get the text with betwee & replace the inside between str with a new str. def replace_between(self, # the between identifiers (list) (#1). identifiers=["{","}"], # the new string (str) (#2). to="", # the identifiers depth. depth=1, # the optional string. string=None, ): update = False if string == None: update = True string = self.string sliced = self.between(identifiers, depth=depth, include=True, string=string) string = string.replace(str(sliced), to) if update: self.string = string return string # # increase version. def increase_version(self): # version 2. # path = "/tmp/increase_version" Files.save(path, f"""version='{self.string}"""+"""' && echo $version | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{if(length($NF+1)>length($NF))$(NF-1)++; $NF=sprintf("%0*d", length($NF), ($NF+1)%(10^length($NF))); print}'""") return subprocess.check_output([f"bash", path]).decode().replace("\n","") # version 1. # old_version = self.string base, _base_= [], old_version.split(".") increase = True for i in _base_: base.append(int(i)) count = len(base)-1 for i in range(len(base)): if increase: if base[count] >= 9: if count > 0: base[count-1] += 1 base[count] = 0 increase = False else: base[count] += 1 break else: base[count] += 1 break else: if count > 0 and int(base[count]) >= 10: base[count-1] += 1 base[count] = 0 increase = False elif count == 0: break count -= 1 version = "" for i in base: if version == "": version = str(i) else: version += "."+str(i) return version # slice dict from string. # get the first {} from the string by depth. def slice_dict(self, depth=1): return self.between(["{", "}"], depth=depth) # slice array from string. # get the first [] from the string by depth. def slice_array(self, depth=1): return self.between(["[", "]"], depth=depth) # slice tuple from string. # get the first () from the string by depth. def slice_tuple(self, depth=1): return self.between(["(", ")"], depth=depth) # iterate chars. # > for charcount, char in String.iterate_chars() def iterate_chars(self): charcount, items = 0, [] for char in self.string: items.append([charcount, char]) charcount += 1 return items def iterate_characters(self): return self.iterate_chars() # iterate lines. # > for linecount, line in String.iterate_lines() def iterate_lines(self): linecount, items = 0, [] for line in self.string.split("\n"): items.append([linecount, line]) linecount += 1 return items # slice indent from string. # get the content bewteen the \n{indent} def indent(self, indent=4): s = "" for i in range(indent): s += " " return s def line_indent(self, line=""): # get line indent. line = line.replace(" ", " ") if len(line) > 0 and " " in line: line_indent = 0 for c in line: if c in [" "]: line_indent += 1 else: break else: line_indent = 0 return Formats.Integer(line_indent) def slice_indent(self, indent=4, depth=1, string=None, remove_indent=True): if string == None: string = self.string string = string.replace(" ", " ") s, open, opened, d = "", 0, False, 0 for line in string.split("\n"): # get line indent. if len(line) > 0 and " " in line: line_indent = 0 for c in line: if c in [" "]: line_indent += 1 else: break else: line_indent = 0 # check indent match. if (not opened and line_indent == indent) or (opened and line_indent >= indent): if d >= depth: if remove_indent: s += line[indent:]+"\n" else: s += line+"\n" opened = True #elif len(line) > 0 and not opened and line_indent == indent: # d += 1 elif len(line) > 0 and line_indent <= indent: if opened: break else: d += 1 return s # get the first / last n characters of the string. def first(self, count): if isinstance(count, (int, float, Integer)): count = int(count) else: count = len(count) return self.string[:count] def last(self, count): if isinstance(count, (int, float, Integer)): count = int(count) else: count = len(count) if len(self.string) >= count: return self.string[count:] else: return None # # remove first / last n characters of the string. def remove_first(self, count): if isinstance(count, (int, float, Integer)): count = int(count) else: count = len(count) removed = self.first(count) self.string = self.string[count:] return self.string def remove_last(self, count): if isinstance(count, (int, float, Integer)): count = int(count) else: count = len(count) removed = self.last(count) self.string = self.string[:-count] return self.string # # support default str functions. def split(self, string): if isinstance(string, (list, Array)): if isinstance(string, Array): array = string.array else: array = string new, last, next_start = [], "", None for i in self.string: last += i newslice = False #l_next_start = None for test in array: if test in last: if str(last[-len(test):]) == str(test): #l_next_start = last[:-len(test)] last = last[:-len(test)] newslice = True break if newslice: new.append(last) last = "" #if next_start == None: new.append(last) #elif include: # new.append(next_start+last) # next_start = None #if include and l_next_start != None: # next_start = l_next_start if last != "": new.append(last) return new else: return Files.Array(self.string.split(str(string))) def count(self, string): return Formats.Integer(self.string.count(str(string))) def replace(self, from_, to_): return self.string.replace(str(from_), str(to_)) def lower(self, string): return self.string.lower(str(string)) def upper(self, string): return self.string.upper(str(string)) # support "+" & "-" . def __add__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.") return self.string + string def __iadd__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.") self.string = self.string + string return self def __sub__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.") return self.string.replace(string, "") def __isub__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {string.__class__}.") self.string = self.string.replace(string, "") return self # support subscriptionable. def __getitem__(self, index): return self.string[Formats.denitialize(index)] def __setitem__(self, index, value): self.string[Formats.denitialize(index)] = str(value) # support default iteration. def __iter__(self): return iter(self.string) # support '>=' & '>' operator. def __gt__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self.string) > len(string) def __ge__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self.string) >= len(string) # support '<=' & '<' operator. def __lt__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self.string) < len(string) def __le__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self.string) <= len(string) # support '==' & '!=' operator. def __eq__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): return False return self.string == string def __ne__(self, string): if isinstance(string, str): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): return True return self.string != string # support +. def __concat__(self, string): if isinstance(string, (str)): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not concat object {self.__class__} & {string.__class__}.") return self.string + string # support 'in' operator. def __contains__(self, string): if isinstance(string, (list, Files.Array)): for i in string: if str(i) in str(self.string): return True return False else: return str(string) in str(self.string) # # representation. def __repr__(self): return str(self) # str representation. def __str__(self): return str(self.string) # int representation. def __int__(self): return int(self.string) # float representation. def __float__(self): return float(self.string) # bool representation. def __bool__(self): return len(self.string) > 0 #if self.string in [1.0, 1, "true", "True", "TRUE", True]: # return True #elif self.string in [0, 0.0, "false", "False", "FALSE", False]: # return False #else: # raise Exceptions.FormatError(f"Could not parse a bool from {self.__id__()}.") # content count. def __len__(self): return len(self.string) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # # object instance. def instance(self): return "String" # @property def __name__(self): return self.instance() # support self assignment. def assign(self, string): if isinstance(string, (int, float)): a=1 elif isinstance(string, self.__class__): string = string.string elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not assign object {self.__class__} & {string.__class__}.") self.string = str(string) return self # return raw data. def raw(self): return self.str # # the boolean object class. class Boolean(object): def __init__(self, # the boolean's value (bool) (#1). boolean=False, # the path (str, FilePath) (param #2). path=False, # load the data on initialization. load=False, # the default array (will be created if file path does not exist). default=None, ): # docs. DOCS = { "module":"Boolean", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(boolean, Formats.Boolean): boolean = boolean.bool # init. self.bool = boolean if self.bool in ["true", "True", "TRUE", True]: self.bool = True else: self.bool = False # path. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: self.file_path = self.fp = Formats.FilePath(path) if default != None and not Files.exists(self.file_path.path): self.save(array=default) if load: self.load() # def save(self, bool=None, path=None, sudo=False): if bool != None: bool = self.bool if path == None: path = self.file_path.path utils.__check_memory_only__(path) self.bool = bool return Files.save(path, str(bool), format="str", sudo=sudo) def load(self, default=None, sudo=False): utils.__check_memory_only__(self.file_path.path) if not os.path.exists(self.file_path.path) and default != None: self.save(default, sudo=sudo) self.bool = Files.load(self.file_path.path, format="str", sudo=sudo) return self.bool def string(self, true="True", false="False"): if self.bool: return true else: return false # native support. def __index__(self): return int(self) # support '==' & '!=' operator. def __eq__(self, boolean): if isinstance(boolean, bool): return self.bool == boolean elif not isinstance(boolean, self.__class__): return False return self.bool == boolean.bool def __ne__(self, boolean): if isinstance(boolean, bool): return self.bool != boolean elif not isinstance(boolean, self.__class__): return True return self.bool != boolean.bool # support default iteration. def __iter__(self): return iter(str(self.bool)) # support 'in' operator. def __contains__(self, string): return string in str(self.bool) # # representation. def __repr__(self): return str(self) # # str representation. def __str__(self): return str(self.bool) # int representation. def __int__(self): if self.bool: return 1 else: return 0 # float representation. def __float__(self): if self.bool: return 1.0 else: return 0.0 # bool representation. def __bool__(self): return self.bool # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Boolean" # @property def __name__(self): return self.instance() # support self assignment. def assign(self, boolean): if isinstance(boolean, (int, float)): a=1 elif isinstance(value, self.__class__): boolean = boolean.bool elif not isinstance(boolean, self.__class__): raise Exceptions.FormatError(f"Can not assign object {self.__class__} & {boolean.__class__}.") self.bool = boolean return self # return raw data. def raw(self): return self.bool # # the integer object class. class Integer(object): def __init__(self, # the integers value (int, float) (param #1). value=0, # the path (str, FilePath) (param #2). path=False, # the integer format (str) (param #3). format="auto", # load the data on initialization. load=False, # the default array (will be created if file path does not exist). default=None, ): # docs. DOCS = { "module":"Integer", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(value, Formats.Integer): if "." in str(value): value = value.float else: value = value.int # init. if "." in str(value): self.format = "float" self.value = float(value) else: self.format = "int" self.value = int(value) self.int = int(value) self.float = float(value) # path. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: self.file_path = self.fp = Formats.FilePath(path) if default != None and not Files.exists(self.file_path.path): self.save(array=default) if load: self.load() # def save(self, data=None, path=None, sudo=False): if data != None: data = self.raw() if path == None: path = self.file_path.path utils.__check_memory_only__(path) if data != self.raw(): self.assign(data) return Files.save(path, str(data), format="str", sudo=sudo) def load(self, default=None, sudo=False): utils.__check_memory_only__(self.file_path.path) if not os.path.exists(self.file_path.path) and default != None: self.save(default, sudo=sudo) data = Files.load(self.file_path.path, format="str", sudo=sudo) self.assign(data) return data def increase_version(self): # version 1. # old_version = self.value base, _base_= [], old_version.split(".") increase = True for i in _base_: base.append(int(i)) count = len(base)-1 for i in range(len(base)): if increase: if base[count] >= 9: if count > 0: base[count-1] += 1 base[count] = 0 increase = False else: base[count] += 1 break else: base[count] += 1 break else: if count > 0 and int(base[count]) >= 10: base[count-1] += 1 base[count] = 0 increase = False elif count == 0: break count -= 1 version = "" for i in base: if version == "": version = str(i) else: version += "."+str(i) return version def round(self, decimals): """ Returns a value rounded down to a specific number of decimal places. """ if not isinstance(decimals, int): raise TypeError("decimal places must be an integer") else: return round(self.value, decimals) def round_down(self, decimals): """ Returns a value rounded down to a specific number of decimal places. """ if not isinstance(decimals, int): raise TypeError("decimal places must be an integer") elif decimals < 0: raise ValueError("decimal places has to be 0 or more") elif decimals == 0: return math.ceil(self.value) factor = 10 ** decimals return math.floor(self.value * factor) / factor # def generate(self, length=6): return utils.generate.pincode(length=length) # # int format. def __index__(self): return self.value # support "+, -, *, %, @, /, //, **" . def __add__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value + value) def __sub__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not sub object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value - value) def __iadd__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {value.__class__}.") self.value += value return self def __isub__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not sub object {self.__class__} & {value.__class__}.") self.value -= value return self def __mod__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mod object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value % value) def __mul__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value * value) def __pow__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value ** value) def __div__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value / value) def __truediv__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value / value) def __floordiv__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value // value) def __concat__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value + value) # support "+=" & "-=". def __pos__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not mul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value + value) def __matmul__(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not matmul object {self.__class__} & {value.__class__}.") return Formats.Integer(self.value @ value) # support //. #def __floordiv__(a, b) # return a // b. # support default iteration. def __iter__(self): return iter(str(self.value)) # support '>=' & '>' operator. def __gt__(self, integer): if isinstance(integer, (int,float)): integer = integer elif not isinstance(integer, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {integer.__class__}.") else: integer = integer.value return self.value > integer def __ge__(self, integer): if isinstance(integer, (int,float)): integer = integer elif not isinstance(integer, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {integer.__class__}.") else: integer = integer.value return self.value >= integer # support '<=' & '<' operator. def __lt__(self, integer): if isinstance(integer, (int,float)): integer = integer elif not isinstance(integer, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {integer.__class__}.") else: integer = integer.value return self.value < integer def __le__(self, integer): if isinstance(integer, (int,float)): integer = integer elif not isinstance(integer, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {integer.__class__}.") else: integer = integer.value return self.value <= integer # support '==' & '!=' operator. def __eq__(self, integer): if isinstance(integer, (int,float)): return self.value == integer elif not isinstance(integer, self.__class__): return False return self.value == integer.value def __ne__(self, integer): if isinstance(integer, (int,float)): return self.value != integer elif not isinstance(integer, self.__class__): return True return self.value != integer.value # support 'in' operator. def __contains__(self, integer): if isinstance(integer, (list, Files.Array)): for i in integer: if str(integer) in str(self.value): return True return False else: return str(value) in str(self.value) # # representation. def __repr__(self): return str(self) # # str representation. def __str__(self): return str(self.value) # int representation. def __int__(self): return self.int # float representation. def __float__(self): if self.format == "float": return self.float else: return float(self.int) # bool representation. def __bool__(self): if self.value in [1.0, 1]: return True elif self.value in [0, 0.0]: return False else: raise Exceptions.FormatError(f"Could not parse a bool from {self.__id__()}.") # content count. def __len__(self): return len(str(self.value)) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Integer" # # support self assignment. def assign(self, value): if isinstance(value, (int, float)): a=1 elif isinstance(value, self.__class__): value = value.value elif not isinstance(value, self.__class__): raise Exceptions.FormatError(f"Can not assign object {self.__class__} & {value.__class__}.") self.value = value return self # return raw data. def raw(self): return self.value # # the date object class. class Date(object): def __init__(self, # # Leave all parameters None to initialize a Date() object with the current date. # Pass another Date object, str repr or timestamp in seconds to initialize a Date object from that timestamp. # # the date parameter (str, int, Date) (optional) (#1). date=None, # the format for the date (leave None to parse the date format automatically) (str). format=None, ): # docs. DOCS = { "module":"Date", "initialized":False, "description":[], "chapter": "Defaults", } # formats. self.default_format = "%d-%m-%y %H:%M:%S" # is Date() str repr self.seconds_format = '%S' self.minute_format = '%M' self.hour_format = '%H' self.day_format = '%d' self.day_name_format = '%A' self.week_format = '%V' self.month_format = '%m' self.month_name_format = '%h' self.year_format = '%Y' self.date_format = '%d-%m-%y' self.timestamp_format = '%d-%m-%y %H:%M' self.shell_timestamp_format = '%d_%m_%y-%H_%M' self.seconds_timestamp_format = '%d-%m-%y %H:%M:%S' self.shell_seconds_timestamp_format = '%d_%m_%y-%H_%M_%S' self.formats = [ self.shell_seconds_timestamp_format, self.seconds_timestamp_format, self.shell_timestamp_format, self.timestamp_format, self.date_format, self.year_format, self.seconds_format, self.minute_format, self.hour_format, self.day_format, self.day_name_format, self.week_format, self.month_format, self.month_name_format, ] # assign if date == None: self.initialize() else: self.assign(date, format=format) # def initialize(self, # # Leave all parameters None to initialize a Date() object with the current date. # # Initialize a future / previous date. # option 1: # specify the timestamp to initialize a previous / future date (format required). timestamp=None, # the timestamp format (leave None to parse). format=None, # options 2: # initialize by seconds. seconds=None, # option 3: # define the datetime object. datetime_obj=None, ): # defaults. #self.__class__.__name__ = "Date" # by datetime_obj if datetime_obj != None: seconds = time.mktime(datetime_obj.timetuple()) today = datetime.fromtimestamp(float(seconds)) # by timestamp & format. elif timestamp != None: if format == None: format = self.parse_format(timestamp) if format == None: raise Exceptions.ParseError(f"Unable to parse the date format from timestamp [{timestamp}]. Find out what the required format is and request a commit that updates the Date().parse_format() function with the required format (https://github.com/vandenberghinc/dev0s/).") seconds = time.mktime(datetime.strptime(str(timestamp), str(format)).timetuple()) today = datetime.fromtimestamp(float(seconds)) # by seconds. elif seconds != None: today = datetime.fromtimestamp(float(seconds)) # by current. else: today = datetime.today() # attributes. self.seconds = str(today.strftime(self.seconds_format)) self.minute = str(today.strftime(self.minute_format)) self.hour = str(today.strftime(self.hour_format)) self.day = str(today.strftime(self.day_format)) self.day_name = str(today.strftime(self.day_name_format)) self.week = str(today.strftime(self.week_format)) self.month = str(today.strftime(self.month_format)) self.month_name = str(today.strftime(self.month_name_format)) self.year = str(today.strftime(self.year_format)) self.date = str(today.strftime(self.date_format)) self.timestamp = str(today.strftime(self.timestamp_format)) self.shell_timestamp = str(today.strftime(self.shell_timestamp_format)) self.seconds_timestamp = str(today.strftime(self.seconds_timestamp_format)) self.shell_seconds_timestamp = str(today.strftime(self.shell_seconds_timestamp_format)) self.time = self.hour + ":" + self.minute return self def compare(self, comparison=None, current=None, format=None): if current == None: current = str(self) if isinstance(comparison, Formats.Date): comparison = str(comparison) if isinstance(current, Formats.Date): current = str(current) if format == None: comparison_format = self.parse_format(comparison) if comparison_format == None: raise Exceptions.ParseError(f"Unable to parse the date format from comparison [{comparison}].") else: comparison_format = format comparison = self.to_seconds(comparison, format=comparison_format) if format == None: current_format = self.parse_format(current) if current_format == None: raise Exceptions.ParseError(f"Unable to parse the date format from current [{current}].") else: current_format = format current = self.to_seconds(current, format=current_format) if comparison >= current: return "future" elif comparison <= current: return "past" elif comparison == current: return "present" else: raise ValueError(f"Unexpected error, comparison seconds: {comparison} current seconds: {current}.") def increase(self, string=None, weeks=0, days=0, hours=0, minutes=0, seconds=0, format=None): if string == None: string = str(self) if isinstance(string, Formats.Date): string = str(string) if format == None: format = self.parse_format(string) if format == None: raise Exceptions.ParseError(f"Unable to parse the date format from string [{string}].") seconds += 60*minutes seconds += 3600*hours seconds += 3600*24*days seconds += 3600*24*7*weeks s = self.to_seconds(string, format=format) s += seconds return self.from_seconds(s, format=format) def decrease(self, string=None, weeks=0, days=0, hours=0, minutes=0, seconds=0, format=None): if string == None: string = str(self) if isinstance(string, Formats.Date): string = str(string) if format == None: format = self.parse_format(string) if format == None: raise Exceptions.ParseError(f"Unable to parse the date format from string [{string}].") seconds += 60*minutes seconds += 3600*hours seconds += 3600*24*days seconds += 3600*24*7*weeks s = self.to_seconds(string, format=format) s -= seconds return self.from_seconds(s, format=format) def to_seconds(self, string=None, format=None): if string == None: string = str(self) if isinstance(string, Formats.Date): string = str(string) if format == None: format = self.default_format return time.mktime(datetime.strptime(str(string), str(format)).timetuple()) # def from_seconds(self, seconds, format=None): if isinstance(seconds, (str,String,Integer)): seconds = float(seconds) if format == None: format = self.default_format return Date(datetime.fromtimestamp(float(seconds)).strftime(format)) # def convert(self, string=None, datetime_obj=None, input=None, output="%Y%m%d"): if datetime_obj == None: if string == None: string = str(self) if isinstance(string, Formats.Date): string = str(string) if input == None: input = self.parse_format(string) datetime_obj = datetime.strptime(str(string), str(input)) return datetime_obj.strftime(str(output)) def parse_format(self, string): if isinstance(string, Formats.Date): return self.default_format elif isinstance(string, (int,float,Integer)): return self.seconds_format formats = [] if "-" in str(string): formats += [ self.shell_seconds_timestamp_format, self.seconds_timestamp_format, self.shell_timestamp_format, self.timestamp_format, self.date_format, ] else: formats += [ self.year_format, self.seconds_format, #self.minute_format, #self.hour_format, #self.day_format, #self.day_name_format, #self.week_format, #self.month_format, #self.month_name_format, ] # plus some custom formats. formats += [ "%d-%m-%y %H:%M.%S", # old default. "%Y-%m-%d %H:%M:%S", # stock market "%d-%m-%Y", # dd-mm-yyyy. "%d-%m-%y %H:%M:%S", # dd-mm-yy hh:mm:ss. "%d-%m-%Y %H:%M:%S", # dd-mm-yyyy hh:mm:ss. "%Y-%m-%dT%H:%M:%SZ", # rfc-3339. "%Y-%m-%d", ] for format in formats: try: datetime.strptime(str(string), str(format)) return format except Exception as e: a=1 return None def assign(self, string, format=None): if isinstance(string, Formats.Date): self = string return self else: if format == None: format = self.parse_format(string) if format == None: raise Exceptions.ParseError(f"Unable to parse a Date() object from string [{string}].") if format == self.seconds_format: self.initialize(seconds=float(string)) else: self.initialize(timestamp=string, format=format) return self # normalize seconds to 10s or 1m etc. def normalize_seconds(self, seconds:(int,float)): if seconds < 0: raise ValueError("Can not normalize negative seconds.") if seconds < 0.01: return f'{int(seconds*1000)}ms' elif seconds <= 60: return f'{int(seconds)}s' elif seconds <= 60*60: return f'{round(seconds/60,1)}m' elif seconds <= 60*60*24: return f'{round(seconds/(60*60),1)}h' elif seconds <= 60*60*24*30: return f'{round(seconds/(60*60*24),1)}d' elif seconds <= 60*60*24*30*12: return f'{round(seconds/(60*60*24*30),1)}m' else: return f'{round(seconds/(60*60*24*30*12),1)}y' # convert to datetime object. def datetime(self, timestamp=None): # set defaults. if timestamp == None: timestamp = str(self) # parse format. seconds = isinstance(timestamp, (int, float)) # by timestamp & format. if not seconds: format = self.parse_format(timestamp) if format == None: raise Exceptions.ParseError(f"Unable to parse the date format from timestamp [{timestamp}]. Find out what the required format is and request a commit that updates the Date().parse_format() function with the required format (https://github.com/vandenberghinc/dev0s/).") seconds = time.mktime(datetime.strptime(str(timestamp), str(format)).timetuple()) return datetime.fromtimestamp(float(seconds)) # by seconds. else: return datetime.fromtimestamp(float(seconds)) # convert to rfc_3339 format. def rfc_3339(self, timestamp=None): # convert. return self.datetime(timestamp=timestamp).isoformat('T') + "Z" # # convert to utc format. def utc(self, timestamp=None): # convert. return self.datetime(timestamp=timestamp).replace(tzinfo=timezone.utc) # # support default iteration. def __iter__(self): return iter([self.year, self.month, self.week, self.hour, self.minutes, self.seconds]) # support '>=' & '>' operator. def __gt__(self, date): if not isinstance(date, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {date.__class__}.") return float(self) > float(date) def __ge__(self, date): if not isinstance(date, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {date.__class__}.") return float(self) >= float(date) # support '<=' & '<' operator. def __lt__(self, date): if not isinstance(date, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {date.__class__}.") return float(self) < float(date) def __le__(self, date): if not isinstance(date, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {date.__class__}.") return float(self) <= float(date) # support '==' & '!=' operator. def __eq__(self, date): if not isinstance(date, self.__class__): return False return float(self) == float(date) def __ne__(self, date): if not isinstance(date, self.__class__): return True return float(self) != float(date) # support 'in' operator. def __contains__(self, string): if isinstance(string, (list, Files.Array)): for i in string: if i in str(self): return True return False else: return string in str(self) # support "+", -, =-, =+" . def __add__(self, add): if isinstance(add, (int,float)): add = float(add) elif isinstance(add, self.__class__): add = add.to_seconds() elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {add.__class__}.") return Date(self.to_seconds() + add) def __iadd__(self, add): if isinstance(add, (int,float)): add = float(add) elif isinstance(add, self.__class__): add = add.to_seconds() elif not isinstance(add, self.__class__): raise Exceptions.FormatError(f"Can not iadd object {self.__class__} & {add.__class__}.") self = Date(self.to_seconds() + add) return self def __sub__(self, add): if isinstance(add, (int,float)): add = float(add) elif isinstance(add, self.__class__): add = add.to_seconds() elif not isinstance(add, self.__class__): raise Exceptions.FormatError(f"Can not sub object {self.__class__} & {add.__class__}.") return Date(self.to_seconds() - add) def __isub__(self, add): if isinstance(add, (int,float)): add = float(add) elif isinstance(add, self.__class__): add = add.to_seconds() elif not isinstance(add, self.__class__): raise Exceptions.FormatError(f"Can not isub object {self.__class__} & {add.__class__}.") self = Date(self.to_seconds() - add) return self # support +. def __concat__(self, add): if isinstance(add, (int,float)): add = float(add) elif isinstance(add, self.__class__): add = add.to_seconds() elif not isinstance(add, self.__class__): raise Exceptions.FormatError(f"Can not sub object {self.__class__} & {add.__class__}.") return Date(self.to_seconds() - add) # representation. def __repr__(self): return str(self) # # int representation. def __int__(self): return int(self.to_seconds(self.seconds_timestamp, format=self.seconds_timestamp_format)) # float representation. def __float__(self): return float(self.to_seconds(self.seconds_timestamp, format=self.seconds_timestamp_format)) # str representation. def __str__(self): return str(self.seconds_timestamp) # content count. def __len__(self): return len(self.seconds_timestamp) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Date" # # # the files class. class Files(): # # functions. def join(path=None, name=None, type=""): if type not in ["", "/"] and "." not in type: type = "." + type path = str(path) if os.path.exists(path) and Files.directory(path) and path[len(path)-1] != "/": path += '/' return gfp.clean("{}{}{}".format(path, name, type), remove_double_slash=True, remove_last_slash=False) def load(path, data="not to be used", format="str", raw=False, sudo=False): # keep data as second param to prevent save load errors. # correct format. if format in [str, String, "String", "string", "file"]: format = "str" if format in [dict, Dictionary, "Dictionary", "dict", "array", "Array"]: format = "json" if format in [bytes, Bytes, "Bytes"]: format = "bytes" #format = str(format) # match format. path = str(path) data = None # sudo. if sudo: data = utils.__execute__(["sudo", "cat", path]) if "No such file or directory" in data: raise FileNotFoundError(f"File [{path}] does not exist.") # proceed. if format == "str": if not sudo: file = open(path,mode='rb') data = file.read().decode() file.close() elif format == "json": if not sudo: try: with open(path, 'r+') as json_file: data = json.load(json_file) except json.decoder.JSONDecodeError as e: try: data = ast.literal_eval(Files.load(path=path, format="str", raw=True, sudo=sudo)) except: e = f"Unable to decode file [{path}] (sudo: {sudo}), error: {e}." raise Exceptions.JSONDecodeError(e) else: try: data = json.loads(data) except json.decoder.JSONDecodeError as e: try: data = ast.literal_eval(Files.load(path=path, format="str", raw=True, sudo=sudo)) except: e = f"Unable to decode file [{path}] (sudo: {sudo}), error: {e}." raise Exceptions.JSONDecodeError(e) elif format == "bytes": if not sudo: with open(path, "rb") as file: data = file.read() else: data = data.encode() else: raise ValueError(f"Unknown format {format}.") if raw: return data else: return Formats.initialize(data) def save( # the path (str) (#1). path, # the data (str, dict, list) (#2). data, # the file format, options: [str, bytes, json]. format="str", # root permission required. sudo=False, # json options. indent=4, ensure_ascii=False, # create backups. backups=False, # warning: safe True keeps infinitely trying to save the doc when an KeyboardInterrupt is raised by the user. safe=True, # system functions. __loader__=None, __checks__=True, __keyboard_interrupt__=False, __attempt__=1, __real_path__=None, ): if __checks__: # correct format. if format in [str, String, "String", "string", "file"]: format = "str" if format in [dict, Dictionary, "Dictionary", "dict", "array"]: format = "json" if format in [bytes, Bytes, "Bytes"]: format = "bytes" #format = str(format) # match format. path = gfp.clean(str(path), remove_double_slash=True, remove_last_slash=False) if sudo: __real_path__ = str(path) tmp_path = path = f"/tmp/{String().generate(length=12)}" data = Formats.denitialize(data) if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") path = str(path) if format == "str": file = open(path, "w+") file.write(data) file.close() elif format == "json": if __checks__: try: test = json.dumps(data) except: raise Exceptions.JSONDecodeError(f"Unable to dump expected json data: {data}") try: with open(path, 'w+') as json_file: json.dump(data, json_file, ensure_ascii=ensure_ascii, indent=indent) except PermissionError: with open(path, 'w') as json_file: json.dump(data, json_file, ensure_ascii=ensure_ascii, indent=indent) except KeyboardInterrupt as e: if __loader__ == None: __loader__ = console.Loader(f"&RED&Do not interrupt!&END& Saving file [{path}] (attempt: {__attempt__}).") if __attempt__ >= 100: __loader__.stop(success=False) raise KeyboardInterrupt(e) return Files.save( path, data, format=format, sudo=sudo, indent=indent, ensure_ascii=ensure_ascii, backups=False, safe=safe, __loader__=__loader__, __checks__=False, __keyboard_interrupt__=str(e), __attempt__=__attempt__+1, __real_path__=__real_path__,) elif format == "bytes": with open(path, "wb") as file: file.write(data) else: raise ValueError(f"Unknown format {format}.") if sudo: if Files.directory(path) and path[len(path)-1] != "/": path += "/" if __real_path__[len(__real_path__)-1] != "/": __real_path__ += "/" os.system(f"sudo rsync -aq {gfp.clean(path)} {gfp.clean(__real_path__)} && rm -fr {tmp_path}") #print(f"sudo mv {gfp.clean(path)} {gfp.clean(__real_path__}") #os.system(f"sudo mv {gfp.clean(path)} {gfp.clean(__real_path__}") # os.system(f"sudo rsync -aq {gfp.clean(path)} {gfp.clean(__real_path__} && rm -fr {tmp_path}") #else: # os.system(f"sudo rsync -ogq {gfp.clean(path)} {gfp.clean(__real_path__} && rm -fr {tmp_path}") if __keyboard_interrupt__ != False: if __loader__ != None: __loader__.stop() raise KeyboardInterrupt(__keyboard_interrupt__) def delete( # the path (param #1). path=None, # root permission required. sudo=False, # forced mode. forced=False, # hide logs. silent=False, ): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") path = str(path) return gfp.delete(path=path, forced=forced, sudo=sudo, silent=silent) def chmod( # the path (param #1). path=None, # the new permission. permission=None, # recursive for entire dir. recursive=False, # root permission required. sudo=False, ): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") if permission == None: raise Exceptions.InvalidUsage("Define parameter: permission.") path = str(path) return gfp.permission.set(path=path, permission=permission, recursive=recursive, sudo=sudo) def chown( # the path (param #1). path=None, # the new owner. owner=None, # the new group (optional). group=None, # recursive for entire dir. recursive=False, # root permission required. sudo=False, ): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") if owner == None: raise Exceptions.InvalidUsage("Define parameter: owner.") path = str(path) return gfp.ownership.set(path=path, owner=owner, group=group, recursive=recursive, sudo=sudo) def exists(path=None, sudo=False): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") return gfp.exists(path=path, sudo=sudo) # def clean( # the path (leave None to use self.path) (param #1). path=None, # the clean options. remove_double_slash=True, remove_first_slash=False, remove_last_slash=False, ensure_first_slash=False, ensure_last_slash=False, ): if path == None: raise ValueError("Define parameter: path.") path = str(path).replace("~",HOME) while True: if remove_double_slash and "//" in path: path = path.replace("//","/") elif remove_first_slash and len(path) > 0 and path[0] == "/": path = path[1:] elif remove_last_slash and len(path) > 0 and path[len(path)-1] == "/": path = path[:-1] elif ensure_first_slash and len(path) > 0 and path[0] != "/": path = "/"+path elif ensure_last_slash and len(path) > 0 and path[len(path)-1] != "/": path += "/" else: break return path def directory( # the path (#1). path=None, # root permission required. sudo=False, ): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") path = Files.clean(path=path, remove_double_slash=True, remove_last_slash=True) path = str(path) return os.path.isdir(path) # def mounted( # the path (#1). path=None, ): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") path = gfp.clean(path=path, remove_double_slash=True, remove_last_slash=True) path = str(path) return os.path.ismount(path) # def create( # the path to the file (str) (required) (#1). path=None, # the data (str) (optional). data=None, # path is directory (bool). directory=False, # the owner (str) (optional). owner=None, # the group (str) (optional). group=None, # the permission (int) (optional). permission=None, # root permission required. sudo=False, ): if path == None: raise Exceptions.InvalidUsage("Define parameter: path.") elif Files.exists(path, sudo=sudo): Exceptions.DuplicateError(f"Path [{path}] already exists.") sudo_str = Boolean(sudo).string(true="sudo ", false="") if directory: os.system(f"{sudo_str}mkdir -p {path}") else: if isinstance(data, (list, Array, dict, Dictionary)): if isinstance(data, (Dictionary,Array)): data = data.raw() Files.save(path=path, data=data, format="json", sudo=sudo, ) else: Files.save(path=path, data=str(data), sudo=sudo) if not Files.exists(path, sudo=sudo): raise ValueError(f"Unable to create {Boolean(directory).string(true='directory', false='file')} [{path}] (sudo: {sudo}).") if permission != None: Files.chmod(path=path, permission=permission, sudo=sudo) if owner != None: Files.chown(path=path, owner=owner, group=group, sudo=sudo) def copy( # the from & to path (#1 & #2). from_, to_, # root permission required. sudo=False, # the active log level. log_level=0, # the exclude patterns. exclude=[], # update deleted files. delete=True, ): if not Files.exists(from_, sudo=sudo): raise FileNotFoundError(f"Specified copy path [{from_}] does not exist.") directory = False if Files.directory(from_, sudo=sudo): directory = True from_ += "/" to_ += "/" from_ = gfp.clean(from_) to_ = gfp.clean(to_) if not Files.exists(gfp.base(to_), sudo=sudo): Files.create(gfp.base(to_), sudo=sudo, directory=directory) exclude_str = "" for i in exclude: exclude_str += f" --exclude '{i}'" os.system(f"{Boolean(sudo).string(true='sudo ', false='')}rsync -azt{Boolean(log_level >= 1).string(true='P',false='')} {from_} {to_} {Boolean(delete).string(true='--delete', false='')}{exclude_str}") def move( # the from & to path (#1 & #2). from_, to_, # root permission required. sudo=False, # root permission required. log_level=0, ): if not Files.exists(from_, sudo=sudo): raise FileNotFoundError(f"Specified move path [{from_}] does not exist.") directory = False if Files.directory(from_, sudo=sudo): directory = True from_ += "/" to_ += "/" from_ = gfp.clean(from_) to_ = gfp.clean(to_) if not Files.exists(gfp.base(to_), sudo=sudo): Files.create(gfp.base(to_), sudo=sudo, directory=directory) os.system(f"{Boolean(sudo).string(true='sudo ', false='')}mv {from_} {to_}") def base( # the path (str, FilePath) (#1). path=None, # the dirs back. back=1, ): if path == None: raise ValueError("Define parameter: path:str.") path = str(path) base = path.replace('//','/') if base[len(base)-1] == '/': base = base[:-1] if len(base.split("/")) <= 1: raise ValueError("Path [{}] has no base.".format(base)) startslash = True if base[0] != "/": startslash = False base = base.split("/") m, c, s = len(base), 0, "" for i in base: if c >= m-back: break if c == 0: s = f"/{i}/" else: s += f"{i}/" c += 1 if startslash: return s else: return s[1:] # # # the file object class. class File(object): def __init__(self, path=None, data=None, load=False, default=None): # docs. DOCS = { "module":"File", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(data, Files.File): data = data.data # init. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: self.file_path = self.fp = Formats.FilePath(path) self.data = data if default != None and not os.path.exists(self.file_path.path): self.save(data=default) if load: self.load() # can be filled with executing [self.x = x()]: def load(self, default=None, sudo=False): utils.__check_memory_only__(str(self.file_path.path)) if not os.path.exists(str(self.file_path.path)) and default != None: self.save(data=default, sudo=sudo) self.data = Files.load(self.file_path.path, format=str, sudo=sudo) return self.data def load_line(self, line_number, default=None, sudo=False): utils.__check_memory_only__(self.file_path.path) if not os.path.exists(self.file_path.path) and default != None: self.save(str(default), self.file_path.path, sudo=sudo) data = Files.load(self.file_path.path, format=str, sudo=sudo) return data.split('\n')[line_number] def save(self, data=None, path=None, overwrite_duplicates=True, sudo=False): if path == None: path = self.file_path.path if data == None: data = self.data utils.__check_memory_only__(path) if overwrite_duplicates: self.data = data return Files.save(path, data, sudo=sudo) else: file_name, original_path = Formats.FilePath(path).name(), path extension = file_name.split('.')[file_name.count('.')] file_name_without_extension = file_name.replace(extension, '') while True: if not os.path.exists(path): break else: path = original_path.replace(file_name, file_name_without_extension+'-'+str(index)+extension) self.data = data return Files.save(path, data, sudo=sudo) def check(self, default=None, save=True): if default != None and isinstance(default, (str, String)): if not self.fp.exists(): self.data = default if save: self.save(data=default) # support default iteration. def __iter__(self): return iter(self.data) # support '>=' & '>' operator. def __gt__(self, string): if not isinstance(string, str): return len(self) > len(string) elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self) > len(string.data) def __ge__(self, string): if not isinstance(string, str): return len(self) >= len(string) elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self) >= len(string.data) # support '<=' & '<' operator. def __lt__(self, string): if not isinstance(string, str): return len(self) < len(string) elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self) < len(string.data) def __le__(self, string): if not isinstance(string, str): return len(self) <= len(string) elif not isinstance(string, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {string.__class__}.") return len(self) <= len(string.data) # support '==' & '!=' operator. def __eq__(self, string): if not isinstance(string, str): return self.data == string elif not isinstance(string, self.__class__): return False return self.data == string.data def __ne__(self, string): if not isinstance(string, str): return self.data != string elif not isinstance(string, self.__class__): return True return self.data != string.data # support 'in' operator. def __contains__(self, key): if isinstance(key, (list, Files.Array)): for i in key: if i in self.data: return True return False else: return key in self.data # str representation. def __str__(self): return str(self.data) # content count. def __len__(self): return len(self.data) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "File" # # support self assignment. def assign(self, data): if isinstance(data, self.__class__): data = data.data self.data = data return self # return raw data. def raw(self): return self.data # # the array object class. class Array(object): def __init__(self, # the array (param #1). array=[], # the path (param #2). path=False, # load the data on initialization. load=False, # the default array (will be created if file path does not exist). default=None, ): # docs. DOCS = { "module":"Array", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(array, Files.Array): array = array.array elif not isinstance(array, list): raise Exceptions.InstanceError(f"Parameter [{self.__class__.__name__}.array] must be a [Array] or [list], not [{array.__class__.__name__}].") # initialize dictionary recursive. #new = [] #for i in array: new.append(Formats.initialize(i)) #array = new #if isinstance(array, Array): # array = array.array # init. if path in [False, None]: self.file_path = self.fp = None # used in local memory (not fysical) self.__path__ = None else: self.file_path = self.fp = Formats.FilePath(path) self.__path__ = self.file_path.path self.array = array if default != None and self.file_path != None and not os.path.exists(self.file_path.path): self.save(array=default) self.array = default if load: self.load() # # save to file. def save(self, array=None, path=None, ensure_ascii=False, indent=4, sudo=False): if array != None: array = self.array if path == None: path = self.file_path.path utils.__check_memory_only__(path) self.array = array return Files.save(path, Formats.denitialize(array), format="json", indent=indent, ensure_ascii=ensure_ascii, sudo=sudo) # load from file. def load(self, default=None, sudo=False): utils.__check_memory_only__(self.file_path.path) if not os.path.exists(self.file_path.path) and default != None: self.save(default, sudo=sudo) self.array = Files.load(self.file_path.path, format="json", sudo=sudo) return self.array # convert to string. def string(self, joiner=" ", sum_first=False): string = "" for x in self.array: if sum_first and string == "": string = joiner + str(x) elif string == '': string = str(x) else: string += joiner + str(x) return str(string) # divide into several arrays. def divide(self, into=2): avg = len(self.array) / float(into) out = [] last = 0.0 while last < len(self.array): out.append(self.array[int(last):int(last + avg)]) last += avg if len(out) > into: while len(out) > into: last = out.pop(len(out)-1) out[len(out)-1] += last return out # reomve indexes or values. def remove(self, indexes=[], values=[]): array = self.array for i in indexes: try: array.pop(i) except: a=1 if values != []: new = [] for v in array: if v not in values: new.append(v) array = new return Array(array, path=self.__path__) # default list functions. def append(self, var): array = list(self.array) return Array(array.append(var), path=self.__path__) def pop(self, index): array = list(self.array) return Array(array.pop(index), path=self.__path__) def count(self, item=None): if item == None: return Formats.Integer(len(self.array)) elif isinstance(item, (str, Formats.String)): c = 0 for i in self: if i == item: c += 1 return Formats.Integer(c) elif isinstance(item, (list, Files.Array)): c = 0 for x in self: for y in item: if x == y: c += 1 return Formats.Integer(c) else: raise Exceptions.InstanceError("Parameter [item] must either be None, String or Array.") # check. def check(self, default=None, save=True): if default != None and isinstance(default, (list, Array)): if not self.fp.exists(): self.array = default if save: self.save(data=default) else: for i in default: if i not in self.array: self.array.append(i) if save: self.save() # clean content. def clean(self, # the string replacements. # example: # { "Hello":"hello" } # [ ["Hello", "hello"] ] replacements={}, # the first characters to remove (String & Array). remove_first=[], # the last characters to remove (String & Array). remove_last=[], # the first characters that are ensured (String & Array) (List: check is one of the list is ensured). ensure_first=[], # the last characters that are ensured (String & Array) (List: check is one of the list is ensured). ensure_last=[], # remove all values within the list from the array. remove_values=[], # update the self array. update=True, # the dicionary (leave None to use self.array). array=None, ): if array == None: array = list(self.array) if isinstance(remove_first, (str, Formats.String)): remove_first = [remove_first] if isinstance(remove_last, (str, Formats.String)): remove_last = [remove_last] if isinstance(ensure_first, (str, Formats.String)): ensure_first = [ensure_first] if isinstance(ensure_last, (str, Formats.String)): ensure_last = [ensure_last] new = [] for item in list(array): if item not in remove_values: while True: edits = False for i in remove_first: if len(item) >= len(i) and item[:len(i)] == i: item = item[len(i):] edits = True for i in remove_last: if len(item) >= len(i) and item[len(i):] == i: item = item[:-len(i)] edits = True for i in ensure_first: if len(item) >= len(i) and item[:len(i)] != i: item = i+item edits = True for i in ensure_last: if len(item) >= len(i) and item[len(i):] != i: item += i edits = True for from_, to_ in replacements.items(): if isinstance(item, (str, Formats.String)) and from_ in item: item = item.replace(from_, to_) edits = True if not edits: break new.append(item) return Array(new, path=self.__path__) # iterations. def iterate(self, sorted=False, reversed=False, array=None): if array == None: array = list(self.array) return self.items(reversed=reversed, sorted=sorted, array=array) # iterate items. def items(self, sorted=False, reversed=False, array=None): if array == None: array = list(self.array) if sorted: array = self.sort(array=array) if reversed: return self.reversed(array=array) else: return Array(array, path=self.__path__) # reserse array. def reversed(self, array=None): if array == None: array = self.array reversed_keys = [] c = len(array)-1 for _ in range(len(array)): reversed_keys.append(array[c]) c -= 1 return Array(reversed_keys, path=self.__path__) # sort array. def sort(self, reversed=False, array=None): if array == None: array = self.array return Array(sorted(array, reverse=reversed), path=self.__path__) # dump json string. def json(self, sorted=False, reversed=False, indent=4, array=None, ): #return json.dumps(Formats.denitialize(self), indent=indent) if array == None: array = self.array return json.dumps(self.serialize(json=False, sorted=sorted, reversed=reversed, array=array), indent=indent) # serialize array. def serialize(self, sorted=False, reversed=False, json=False, array=None): if array == None: array = self.array if isinstance(array, Files.Array): array = array.array if sorted: items = self.items(reversed=reversed, array=self.sort(alphabetical=True, array=array)) else: items = self.items(reversed=reversed, array=array) new = [] for value in items: if isinstance(value, (dict, Files.Dictionary)): value = Files.Dictionary().serialize(json=json, sorted=sorted, reversed=reversed, dictionary=value) elif isinstance(value, (list, Files.Array)): value = self.serialize(json=json, sorted=sorted, reversed=reversed, array=value) elif isinstance(value, object): value = str(value) elif isinstance(value, str) or isinstance(value, bool) or value == None: if value in [True, "True", "True".lower()]: if json: value = "true" else: value = True elif value in [False, "False", "False".lower()]: if json: value = "false" else: value = False elif value in [None, "None", "None".lower()]: if json: value = "null" else: value = None new.append(value) return new # randomize the content of the array always non recursive. def randomize(self, # optionally pass the array (leave None to use self.array). array=None, ): if array == None: array = list(self.array) randomized = [] while len(array) > 0: index = random.randrange(0, len(array)) item = array.pop(index) randomized.append(item) return Array(randomized, path=self.__path__) # # limit the content of the array. def limit(self, # limit to the number of samples. limit:int, # the index to start from. start=0, # optionally pass the array (leave None to use self.array). array=None, ): if array == None: array = list(self.array) return Array(array[start:start+limit], path=self.__path__) # min of numerical array. def min(self): min = self.array[0] for item in self.array: if item < min: min = item return min # max of numerical array. def max(self): max = self.array[0] for item in self.array: if item > max: max = item return max # sum numerical array. def sum(self): return sum(self.array) # mean of numerical array. def mean(self, window=None): return self.sum() / len(self.array) # # variance of numerical array. def variance(self): mean = self.mean() deviations = [] for x in self.array: deviations.append((x - mean) ** 2) return sum(deviations) / len(self.array) # standard deviation of numerical array. def stdev(self): return math.sqrt(self.variance()) # copy. def copy(self): return Files.Array(self.array, path=self.__path__) # # support "+", -, =-, =+" . def __add__(self, array): if isinstance(array, list): a=1 elif isinstance(array, self.__class__): array = array.array elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {array.__class__}.") return Array(self.array + array) def __iadd__(self, array): if isinstance(array, list): a=1 elif isinstance(array, self.__class__): array = array.array elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {array.__class__}.") self.array += array def __sub__(self, array): if isinstance(array, list): a=1 elif isinstance(array, self.__class__): array = array.array elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {array.__class__}.") new = [] for i in self.array: if i not in array: new.append(i) return Array(new) def __isub__(self, array): if isinstance(array, list): a=1 elif isinstance(array, self.__class__): array = array.array elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {array.__class__}.") new = [] for i in self.array: if i not in array: new.append(i) self.array = new # support +. def __concat__(self, array): if isinstance(array, list): a=1 elif isinstance(array, self.__class__): array = array.array elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {array.__class__}.") return Array(self.array + array) # support default iteration. def __iter__(self): return iter(self.array) # support '>=' & '>' operator. def __gt__(self, array): if not isinstance(array, list): return len(self.array) > len(array) elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {array.__class__}.") return len(self.array) > len(array.array) def __ge__(self, array): if not isinstance(array, list): return len(self.array) >= len(array) elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {array.__class__}.") return len(self.array) >= len(array.array) # support '<=' & '<' operator. def __lt__(self, array): if not isinstance(array, list): return len(self.array) < len(array) elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {array.__class__}.") return len(self.array) < len(array.array) def __le__(self, array): if not isinstance(array, list): return len(self.array) <= len(array) elif not isinstance(array, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {array.__class__}.") return len(self.array) <= len(array.array) # support '==' & '!=' operator. def __eq__(self, array): if not isinstance(array, list): return self.array == array elif not isinstance(array, self.__class__): return False return self.array == array.array def __ne__(self, array): if not isinstance(array, list): return self.array != array elif not isinstance(array, self.__class__): return True return self.array != array.array # support 'in' operator. def __contains__(self, key): if isinstance(key, (list, Files.Array)): for i in key: if i in self.array: return True return False else: return key in self.array # support '*' operator. def __mul__(self, value): if isinstance(value, int): a=1 else: raise Exceptions.FormatError(f"Can not mul object {self.__class__.__name__} & {value.__class__.__name__}.") return Array(self.array * value) # support '/' operator. def __div__(self, value): if isinstance(value, int): a=1 else: raise Exceptions.FormatError(f"Can not div object {self.__class__.__name__} & {value.__class__.__name__}.") return Array(self.divide(into=value)) # support item assignment. def __setitem__(self, index, value): #if "/" in item try: self.array[Formats.denitialize(index)] = value except IndexError: self.array.append(value) def __getitem__(self, index): return self.array[Formats.denitialize(index)] def __delitem__(self, index): #if "/" in item return self.array.pop(Formats.denitialize(index)) # representation. def __repr__(self): return str(self) # # str representation. def __str__(self): return str(Formats.denitialize(self.array)) # content count. def __len__(self): return len(self.array) # object id. def __id__(self): if len(self.array) > 10: return f"({self.instance()}:[{self.array[0]}, {self.array[0]}, {self.array[0]}, ... {self.array[len(self.array)-3]}, {self.array[len(self.array)-2]}, {self.array[len(self.array)-1]}])" else: return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Array" # # support self assignment. def assign(self, array): if isinstance(array, self.__class__): array = array.array self.array = array # assign a new path. def assign_path(self, path): self.file_path = FilePath(path) self.__path__ = path # return raw data. def raw(self): return self.array # # the dictionary object class. class Dictionary(object): def __init__(self, # the dictionary (param #1). dictionary={}, # the file path (param #2). path=False, # load the file path dictionary on init. load=False, # specify default to check & create the dict. default=None, ): # docs. DOCS = { "module":"Dictionary", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(dictionary, Files.Dictionary): dictionary = dictionary.dictionary elif not isinstance(dictionary, dict): raise Exceptions.InstanceError(f"Parameter [{self.__class__.__name__}.dictionary] must be a [Dictionary] or [dict], not [{dictionary.__class__.__name__}].") """elif not isinstance(dictionary, dict): max_attempts = 2 for attempt in range(max_attempts): try: if 1+attempt == 1: dictionary = dictionary.dict() break elif 1+attempt == 2: dictionary = dictionary.json() break except: if 1+attempt >= max_attempts: raise Exceptions.InstanceError(f"Parameter [{self.__class__.__name__}.dictionary] must be a [Dictionary] or [dict], not [{dictionary.__class__.__name__}].") """ # initialize dictionary recursive. #for key in list(dictionary.keys()): # dictionary[key] = Formats.initialize(dictionary[key]) #if isinstance(dictionary, Dictionary): # dictionary = dictionary.dictionary # arguments. self.dictionary = dictionary self.path = gfp.clean(path=path) self.default = default self.file_path = self.fp = None self.__path__ = None # checks. if path not in [False, None]: self.file_path = self.fp = Formats.FilePath(path) self.__path__ = self.file_path.path if self.default != None: self.dictionary = self.check(default=self.default).dictionary if self.file_path != None and not self.file_path.exists(): self.save() if load: self.load() # # save to file. def save(self, dictionary=None, path=None, ensure_ascii=False, indent=4, sudo=False): utils.__check_memory_only__(self.file_path.path) if dictionary == None: dictionary = dict(self.dictionary) if path == None: path = self.file_path.path return Files.save(path, Formats.denitialize(dictionary), format="json", indent=indent, ensure_ascii=ensure_ascii, sudo=sudo) # load from file. def load(self, default=None, sudo=False): utils.__check_memory_only__(self.file_path.path) if not os.path.exists(self.file_path.path) and default != None: self.save(default, sudo=sudo) self.dictionary = Files.load(self.file_path.path, format="json", sudo=sudo) return self.dictionary # load a single line from file. def load_line(self, line_number, sudo=False): utils.__check_memory_only__(self.file_path.path) data = Files.load(str(self.file_path.path, sudo=sudo)) return data.split('\n')[line_number] # check the dictionary. def check(self, # Option 1: key=None, # check a certain key, it appends if not present value=None, # check a certain key, append the value if not present (no format check) # Option 2: default=None, # check based on a default dictionary, it appends it not present. # Optionals: dictionary=None, # overwrite the start dictionary, leave None to use self.dictionary. save=False, # saves the output & and sets the output to self.dictionary. ): # functions. def __iterate_dict__(dictionary, default): #print("\niterating new dictionary: [{}] & default [{}]\n".format(dictionary, default)) for identifier, item in default.items(): if isinstance(item, (dict,Dictionary)): try: dictionary[str(identifier)] = __iterate_dict__(dictionary[str(identifier)], item) except KeyError: dictionary[str(identifier)] = dict(item) elif isinstance(item, (list,Array)): if isinstance(item, (list)): item = list(item) elif isinstance(item, (Array)): item = item.array try: dictionary[str(identifier)] except KeyError: dictionary[str(identifier)] = item else: try: dictionary[str(identifier)] except KeyError: dictionary[str(identifier)] = item return dictionary # init. if dictionary == None: dictionary = dict(self.dictionary) if not isinstance(dictionary, (dict, Dictionary)): raise Exceptions.InvalidUsage(f"<Dictionary.check> parameter [dicionary] requires to be a [dict, Dictionary] not [{dictionary.__class__.__name__}].") # - option 1: if key == None and value != None: raise ValueError("Define both parameters: [key & value].") elif value == None and key != None: raise ValueError("Define both parameters: [key & value].") if key != None and value != None: try: dictionary[key] except KeyError: dictionary[key] = value return dictionary # - option 2: if default == None: default = self.default if default == None: raise ValueError("Define both parameters: [key & value] or parameter [default].") dictionary = __iterate_dict__(dictionary, default) return Dictionary(dictionary, path=self.__path__) # # divide dictionary into multiple arrays. def divide(self, into=2): return_list = [dict() for idx in range(into)] idx = 0 for k,v in self.dictionary.items(): return_list[idx][k] = v if idx < into-1: # indexes start at 0 idx += 1 else: idx = 0 return Array(return_list) # append to dict. def append(self, # by default it only overwrites if a key does not exist and sums the key if it is a str / int. # # a dictionary to append. dictionary, # the overwrite formats (add "*" for all). overwrite=[], # the sum formats (add "*" for all). sum=["int", "float"], # the banned dictionary keys. banned=[], # do not use. dictionary_=None, ): if dictionary_ == None: dictionary_ = dict(self.dictionary) if dictionary == dictionary_: return dictionary if dictionary_ == {}: return dictionary for key, value in dictionary.items(): if key not in banned: if isinstance(value, (dict, Dictionary)): found = True try: dictionary_[key] except: found = False if found: dictionary_[key] = self.append(value, overwrite=overwrite, sum=sum, banned=banned, dictionary_=dictionary_[key], save=False, update=False) else: dictionary_[key] = value else: format = value.__class__.__name__ if "*" in sum or format in sum: if format in ["str", "int", "float", "list", "Array"]: try: dictionary_[key] += value except KeyError: dictionary_[key] = value else: # cant be summed. dictionary_[key] = value elif "*" in overwrite or format in overwrite: dictionary_[key] = value else: try: dictionary_[key] except KeyError: dictionary_[key] = value return Dictionary(dictionary_, path=self.__path__) # edit. def edit(self, # the dictionary (leave None to use self.dictionary). dictionary=None, # the edits (dict). # adds / replaces the current (except the exceptions). edits={}, # the edits key Exceptions. exceptions=[], # the edits value Exceptions. value_exceptions=[None], # the instances to overwrite (list[str]) (missing stands for the keys that are missing in the dictionary). overwite=["missing"], # the instances to combine (list[str]) (dict is always recursive). combine=["int", "float", "Integer", "list", "Array"], # the log level. log_level=-1, ): def edit_dict(dictionary={}, edits={}): c = 0 for key, value in edits.items(): found = True try: dictionary[key] except KeyError: found = False # recursive. if key not in exceptions and value not in value_exceptions and isinstance(value, (dict, Dictionary)): if isinstance(value, (Dictionary)): value = value.dictionary if found: dictionary[key], lc = edit_dict(dictionary=dictionary[key], edits=value) c += lc else: if log_level >= 0: print(f"Editing {alias} config {key}: {value}.") dictionary[key] = value c += 1 elif key not in exceptions and value not in value_exceptions and not found and "missing" in overwrite: if log_level >= 0: print(f"Editing {alias} config {key}: {value}.") dictionary[key] = value c += 1 elif key not in exceptions and value not in value_exceptions and found and value.__class__.__name__ in combine: if log_level >= 0: print(f"Editing {alias} config {key}: {value}.") dictionary[key] = dictionary[key] + value c += 1 return dictionary, c # check specific. if dictionary == None: dictionary = dict(self.dictionary) dictionary, c = edit_dict(dictionary=dictionary, edits=edits) return Dictionary(dictionary, path=self.__path__) # unpack attribute(s). def unpack(self, # the key / keys / defaults parameter (#1). # str instance: # unpack the str key # list instance: # unpack all keys in the list. # dict instance: # unpack all keys from the dict & when not present return the key's value as default. keys, ): defaults_ = {} if isinstance(keys, (dict, Files.Dictionary)): if isinstance(keys, dict): defaults_ = dict(keys) keys = list(keys.keys()) else: defaults_ = keys.dict() keys = keys.keys() elif isinstance(keys, str): keys = [keys] unpacked = [] for key in keys: value, set = None, True try: value = self.dictionary[key] except KeyError: try: value = defaults_[key] except KeyError: set = False if not set: raise Exceptions.UnpackError(f"Dictionary does not contain attribute [{key}].") unpacked.append(value) if len(unpacked) == 1: return unpacked[0] else: return unpacked # remove. def remove(self, keys=[], values=[], update=True, save=False, dictionary=None): if dictionary == None: dictionary = dict(self.dictionary) for i in list(keys): try: del dictionary[i] except: a=1 if values != []: new = {} for k,v in dictionary.items(): if v not in values: new[k] = v dictionary = new return Dictionary(dictionary, path=self.__path__) # count keys or values. def count(self, item=None, values=False): if item == None: return Formats.Integer(len(self.dictionary)) elif isinstance(item, (str, Formats.String)): c, array = 0, [] if values: array = self.values() else: array = self.keys() for i in array: if i == item: c += 1 return Formats.Integer(c) elif isinstance(item, (list, Files.Array)): c, array = 0, [] if values: array = self.values() else: array = self.keys() for x in array: for y in item: if x == y: c += 1 return Formats.Integer(c) else: raise Exceptions.InstanceError(f"Parameter [item] must either be [None], [String] or [Array], not [{item.__class__}].") # insert new keys & values. def insert(self, dictionary={}, __dictionary__=None): if __dictionary__ == None: __dictionary__ = dict(self.dictionary) for key,value in dictionary.items(): if isinstance(value, (dict, Dictionary)): if key in __dictionary__: __dictionary__[key] = self.insert(value, __dictionary__=__dictionary__[key]) else: __dictionary__[key] = value elif isinstance(value, (list, Array)): if key in __dictionary__: for i in value: if i not in __dictionary__[key]: __dictionary__[key].append(i) else: __dictionary__[key] = value else: __dictionary__[key] = value return Dictionary(__dictionary__, path=self.__path__) # iterate keys and values. def iterate(self, sorted=False, reversed=False, dictionary=None): if dictionary == None: dictionary = self.dictionary return self.items(reversed=reversed, sorted=sorted, dictionary=dictionary) def items(self, sorted=False, reversed=False, dictionary=None): if dictionary == None: dictionary = self.dictionary if sorted: dictionary = self.sort(dictionary=dictionary) if reversed: return self.reversed(dictionary=dictionary).items() else: return dictionary.items() # iterate keys. def keys(self, dictionary=None): if dictionary == None: dictionary = self.dictionary return Array(list(dictionary.keys())) # iterate values. def values(self, dictionary=None): if dictionary == None: dictionary = dict(self.dictionary) values = [] for key, value in dictionary.items(): values.append(value) return Array(values) # reverse dictionary. def reversed(self, dictionary=None): if dictionary == None: dictionary = dict(self.dictionary) keys = list(dictionary.keys()) reversed_keys = [] c = len(keys)-1 for _ in range(len(keys)): reversed_keys.append(keys[c]) c -= 1 reversed_dict = {} for key in reversed_keys: reversed_dict[key] = dictionary[key] return Dictionary(reversed_dict, path=self.__path__) # sort ascending dictionary. def sort(self, # reverse ascending to descending. reversed=False, # sort the keys or sort the values. sort="keys", # system parameters. dictionary=None, ): if dictionary == None: dictionary = dict(self.dictionary) if sort == "values": new = {} for key in sorted(dictionary, key=dictionary.get, reverse=reversed): new[key] = dictionary[key] elif sort == "keys": new = {} for key in sorted(dictionary, reverse=reversed): new[key] = dictionary[key] else: raise ValueError(f"Selected an invalid sort mode [{sort}].") return Dictionary(new, path=self.__path__) # dump json string. def json(self, indent=4, dictionary=None, ): if dictionary == None: dictionary = self.dictionary return json.dumps(self.serialize(json=False, dictionary=dictionary), indent=indent) # serialize dict. def serialize(self, json=False, dictionary=None): if dictionary == None: dictionary = dict(self.dictionary) if isinstance(dictionary, Files.Dictionary): dictionary = dictionary.dictionary items = self.items(dictionary=dictionary) dictionary = {} for key, value in items: if isinstance(value, (dict, Files.Dictionary)): value = self.serialize(json=json, dictionary=value) elif isinstance(value, (list, Files.Array)): value = Files.Array(value).serialize(json=json) elif isinstance(value, object): value = str(value) elif isinstance(value, str) or isinstance(value, bool) or value == None: if value in [True, "True", "True".lower()]: if json: value = "true" else: value = True elif value in [False, "False", "False".lower()]: if json: value = "false" else: value = False elif value in [None, "None", "None".lower()]: if json: value = "null" else: value = None dictionary[key] = value return dictionary # copy. def copy(self): return Files.Dictionary(self.dictionary, path=self.__path__) # # system functions. def __reverse_keys_and_values__(self, dictionary=None): if dictionary == None: dictionary = self.dictionary new = {} for key,value in dictionary.items(): new[value] = key return new def __serialize_string__(self, string, banned_characters=["@"]): c, s, l = 0, "", False for char in string: if char not in banned_characters: # regular letter. if char.lower() == char: s += char.lower() l = False # capital letter. else: if c == 0: s += char.lower() else: if l: s += char.lower() else: s += "_"+char.lower() l = True c += 1 return s def __serialize_dictionary__(self, response): _response_ = {} for key,value in response.items(): s_key = self.__serialize_string__(key) if isinstance(value, dict): _response_[s_key] = self.__serialize_dictionary__(value) elif isinstance(value, str): try: integer = int(value) except: integer = False if integer != False: _response_[s_key] = integer elif value in ["false", "False", "FALSE", "DISABLED"]: _response_[s_key] = False elif value in ["true", "True", "TRUE", "ENABLED"]: _response_[s_key] = True else: _response_[s_key] = value else: _response_[s_key] = value return _response_ # support "+", -, =-, =+" . def __add__(self, dictionary): if isinstance(dictionary, dict): a=1 elif isinstance(dictionary, self.__class__): dictionary = dictionary.dictionary elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {dictionary.__class__}.") return self.append(dictionary=dictionary, overwrite=["*"], sum=[]) def __iadd__(self, dictionary): if isinstance(dictionary, dict): a=1 elif isinstance(dictionary, self.__class__): dictionary = dictionary.dictionary elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {dictionary.__class__}.") self.dictionary = self.append(dictionary=dictionary, overwrite=["*"], sum=[]).dictionary def __sub__(self, dictionary): if isinstance(dictionary, dict): keys = list(dictionary.keys()) elif isinstance(dictionary, list): keys = dictionary elif isinstance(dictionary, Files.Array): keys = dictionary.array elif isinstance(dictionary, self.__class__): keys = dictionary.keys() elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {dictionary.__class__}.") return self.remove(keys=keys) def __isub__(self, dictionary): if isinstance(dictionary, dict): keys = list(dictionary.keys()) elif isinstance(dictionary, list): keys = dictionary elif isinstance(dictionary, Files.Array): keys = dictionary.array elif isinstance(dictionary, self.__class__): keys = dictionary.keys() elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {dictionary.__class__}.") self.dictionary = self.remove(keys=keys, update=True).dictionary # support +. def __concat__(self, string): if isinstance(dictionary, dict): a=1 elif isinstance(dictionary, self.__class__): dictionary = dictionary.dictionary elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not add object {self.__class__} & {dictionary.__class__}.") return self.append(dictionary=dictionary, sum=[], overwrite=["*"]) # support default iteration. def __iter__(self): return iter(self.dictionary) # support '>=' & '>' operator. def __gt__(self, dictionary): if isinstance(dictionary, dict): return len(self.dictionary) > len(dictionary) elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {dictionary.__class__}.") return len(self.dictionary) > len(dictionary.dictionary) def __ge__(self, dictionary): if isinstance(dictionary, dict): return len(self.dictionary) >= len(dictionary) elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {dictionary.__class__}.") return len(self.dictionary) >= len(dictionary.dictionary) # support '<=' & '<' operator. def __lt__(self, dictionary): if isinstance(dictionary, dict): return len(self.dictionary) < len(dictionary) elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {dictionary.__class__}.") return len(self.dictionary) < len(dictionary.dictionary) def __le__(self, dictionary): if isinstance(dictionary, dict): return len(self.dictionary) <= len(dictionary) elif not isinstance(dictionary, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {dictionary.__class__}.") return len(self.dictionary) <= len(dictionary.dictionary) # support '==' & '!=' operator. def __eq__(self, dictionary): if isinstance(dictionary, dict): return str(self.sort()) == str(Dictionary(dictionary).sort()) elif isinstance(dictionary, Dictionary): return str(self.sort()) == str(dictionary.sort()) else: try: return str(self.sort()) == str(dictionary.sort()) except: return False def __ne__(self, dictionary): if isinstance(dictionary, dict): return str(self.sort()) != str(Dictionary(dictionary).sort()) elif isinstance(dictionary, Dictionary): return str(self.sort()) != str(dictionary.sort()) else: try: return str(self.sort()) != str(dictionary.sort()) except: return False # support 'in' operator. def __contains__(self, key): keys = list(self.dictionary.keys()) if isinstance(key, (list, Files.Array)): for i in key: if i in keys: return True return False else: return key in keys # support item assignment. def __setitem__(self, key, value): if isinstance(key, (int, Integer)): key = self.keys()[key] self.dictionary[Formats.denitialize(key)] = value def __getitem__(self, key): if isinstance(key, slice): raise ValueError("Coming soon.") elif isinstance(key, (int, Integer)): key = self.keys()[key] return self.dictionary[Formats.denitialize(key)] # def __delitem__(self, key): if isinstance(key, (int, Integer)): key = self.keys()[key] del self.dictionary[Formats.denitialize(key)] def __splitkey__(self, key): if key in self: return [key] return gfp.clean(path=key, remove_last_slash=True, remove_double_slash=True, remove_first_slash=True).split("/") # representation. def __repr__(self): return str(self) # # str representation. def __str__(self): return str(Formats.denitialize(self.dictionary)) # content count. def __len__(self): return len(self.dictionary) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self, serialize=False): return "Dictionary" @property def __name__(self): return self.instance() # support self assignment. def assign(self, dictionary): if isinstance(dictionary, self.__class__): dictionary = dictionary.dictionary self.dictionary = dictionary # assign a new path. def assign_path(self, path): self.file_path = FilePath(path) self.__path__ = path # return raw data. def raw(self): return self.dictionary # # # the directory object class. class Directory(object): def __init__(self, # the dirs file path (param #1). path=None, # the hierarchy to check / create. hierarchy={}, # load the content. #load=False, # load recursive. #recursive=False, ): # docs. DOCS = { "module":"Directory", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(path, Files.Directory): path = path.fp.path # init. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: if path[len(path)-1] != "/": path += "/" self.file_path = self.fp = Formats.FilePath(path) self.hierarchy = hierarchy if self.hierarchy != {}: self.check(hierarchy=hierarchy) # load. #self.content = {} #if load: # self.content = {} # can be filled with executing [self.x = x()]: # executable functions. # actions. def create(self, file_paths=[], path=None, sudo=False, owner=None, group=None, permission=None): # - init: if path == None: path = self.file_path.path # - create dir: if not os.path.exists(path): if sudo: os.system('sudo mkdir -p '+path) else: os.system('mkdir -p '+path) # - copy files: commands = [] for l_path in file_paths: if sudo: command = None if Files.directory(l_path): command = 'sudo cp -r {0} {1} '.format(l_path, path+Formats.FilePath(l_path).name()) else: command = 'sudo cp {0} {1}'.format(l_path, path+Formats.FilePath(l_path).name()) commands.append(command) else: command = None if Files.directory(l_path): command = 'cp -r {0} {1} '.format(l_path, path+Formats.FilePath(l_path).name()) else: command = 'cp {0} {1}'.format(l_path, path+Formats.FilePath(l_path).name()) commands.append(command) if len(commands) > 0: if sudo: script = Files.ShellScript( data=command, path='/tmp/shell_script-'+str(random.randrange(23984792,23427687323))+'.sh' ) script.save() script.setPermission(755) script.execute(sudo=sudo) script.delete() else: os.system(Files.Array(array=commands,path=False).string(joiner=" \n ")) if owner != None or group!=None: self.file_path.ownership.set(owner=owner, group=group, sudo=sudo) if permission != None: self.file_path.permission.set(permission=permission, sudo=sudo) def delete(self, forced=False): if forced: os.system('rm -fr {}'.format(self.file_path.path)) else: os.system('rm -r {}'.format(self.file_path.path)) def check(self, # Required: # - dictionary format: hierarchy=None, # Optionals: # - string format: owner=None, group=None, # - boolean format: sudo=False, # - integer format: permission=None, # (octal format) recursive=False, # for permission/ownership silent=False, ): format = { "my_directory_name":{ # Required: "path":"my_directory_name/", # Optionals: "permission":755, "owner":"daanvandenbergh", "group":None, "sudo":False, "directory":True, "recursive":False, # for permission & ownership (directories). "default_data":None, # makes it a file "default":None, # makes it a dictionary } } def checkPermissionOwnership(file_path, dictionary, silent=False, recursive=False): if dictionary["permission"] != None and dictionary["permission"] != file_path.permission.permission: #print("editing file [{}] permission [{}] to [{}]...".format(file_path.path, file_path.permission.permission, dictionary["permission"])) file_path.permission.set(permission=dictionary["permission"], sudo=dictionary["sudo"], recursive=recursive, silent=silent) if dictionary["owner"] != None and dictionary["owner"] != file_path.ownership.owner: #print("editing file [{}] owner [{}] to [{}]...".format(file_path.path, file_path.ownership.owner, dictionary["owner"])) file_path.ownership.set(owner=dictionary["owner"], group=file_path.ownership.group, sudo=dictionary["sudo"], recursive=recursive, silent=silent) #print("file [{}] current group [{}] wanted group [{}]".format(file_path.path, file_path.ownership.group, dictionary["group"])) if dictionary["group"] != None and dictionary["group"] != file_path.ownership.group: #print("editing file [{}] group [{}] to [{}]...".format(file_path.path, file_path.ownership.group, dictionary["group"])) file_path.ownership.set(owner=file_path.ownership.owner, group=dictionary["group"], sudo=dictionary["sudo"], recursive=recursive, silent=silent) if hierarchy == None: hierarchy = self.hierarchy #if owner == None: owner = self.owner #if group == None: group = self.group #if permission == None: permission = self.permission file_path = Formats.FilePath(self.file_path.path) if file_path.exists(sudo=sudo) == False: file_path.create( directory=True, permission=permission, group=group, owner=owner, sudo=sudo) elif group != None or owner != None or permission != None: file_path.permission.permission = file_path.permission.get() _owner_,_group_ = file_path.ownership.get() file_path.ownership.group = _group_ file_path.ownership.owner = _owner_ checkPermissionOwnership(file_path, {"sudo":sudo, "owner":owner, "group":group, "permission":permission}, recursive=recursive, silent=silent) if hierarchy == None: raise ValueError("Define dictionary parameter: hierarchy") for identifier, dictionary in hierarchy.items(): # - check: try: dictionary["path"] = self.file_path.path + dictionary["path"] except: raise ValueError("Invalid hierarchy item [{} : {}]. Specify the [path].".format(identifier, "?")) try: dictionary["permission"] except KeyError: dictionary["permission"] = None try: dictionary["owner"] except KeyError: dictionary["owner"] = None try: dictionary["group"] except KeyError: dictionary["group"] = None try: dictionary["directory"] except KeyError: dictionary["directory"] = False try: dictionary["sudo"] except KeyError: dictionary["sudo"] = False try: dictionary["default_data"] except KeyError: dictionary["default_data"] = None try: dictionary["default"] except KeyError: dictionary["default"] = None try: dictionary["recursive"] except KeyError: dictionary["recursive"] = False # - directory: if dictionary["directory"]: file_path = Formats.FilePath(dictionary["path"]) if file_path.exists(sudo=dictionary["sudo"]) == False: file_path.create( directory=True, permission=dictionary["permission"], group=dictionary["group"], owner=dictionary["owner"], sudo=dictionary["sudo"],) else: file_path.permission.permission = file_path.permission.get() _owner_,_group_ = file_path.ownership.get() file_path.ownership.group = _group_ file_path.ownership.owner = _owner_ #if 'back_up_requests/requests' in file_path.path: # print("file: {}, owner: {}, group: {}, permission: {}".format(file_path.path, file_path.ownership.owner, file_path.ownership.group, file_path.permission.permission)) checkPermissionOwnership(file_path, dictionary, silent=silent, recursive=dictionary["recursive"]) # - file: elif dictionary["default_data"] != None: file = Files.File(path=dictionary["path"]) if file.file_path.exists(sudo=dictionary["sudo"]) == False: file.file_path.create( data=dictionary["default_data"], permission=dictionary["permission"], group=dictionary["group"], owner=dictionary["owner"], sudo=dictionary["sudo"]) else: file.file_path.permission.permission = file_path.permission.get() _owner_,_group_ = file_path.ownership.get() file.file_path.ownership.group = _group_ file.file_path.ownership.owner = _owner_ checkPermissionOwnership(file.file_path, dictionary, silent=silent) # - dictionary: elif dictionary["default"] != None: file = Files.Dictionary(path=dictionary["path"]) if file.file_path.exists(sudo=dictionary["sudo"]) == False: file.save(dictionary["default"]) file.file_path.permission.check( permission=dictionary["permission"], sudo=dictionary["sudo"]) file.file_path.ownership.check( group=dictionary["group"], owner=dictionary["owner"], sudo=dictionary["sudo"]) else: file.file_path.permission.permission = file_path.permission.get() _owner_,_group_ = file_path.ownership.get() file.file_path.ownership.group = _group_ file.file_path.ownership.owner = _owner_ checkPermissionOwnership(file.file_path, dictionary, silent=silent) file.check(default=default, save=True) else: raise ValueError("Invalid hierarchy item [{} : {}]. Either [directory] must be enabled, or [default_data / default] must be specified.".format(identifier, dictionary["path"])) # # load & save sub paths. def load(self, path=None, format=str, default=None, sudo=False): return Files.load(path=self.fullpath(path), format=format, sudo=sudo) def save(self, path=None, data=None, format=str, sudo=False): return Files.save(path=self.fullpath(path), data=data, format=format, sudo=sudo) # returnable functions. def paths(self, # get recursively (bool). recursive=False, # get files only (bool). files_only=False, # get firs only (bool). dirs_only=False, # also get empty dirs (bool). empty_dirs=True, # the banned full paths (list). banned=[], # the banned names (list). banned_names=[".DS_Store"], # the banend base names (list). banned_basenames=["__pycache__"], # the allowed extensions (list). extensions=["*"], # the path (leave None to use self.path) (str, FilePath). path=None, ): if dirs_only and files_only: raise ValueError("Both parameters dirs_only & piles_only are True.") if path == None: path = self.file_path.path path = str(path) if not Files.exists(path): return [] if isinstance(extensions, str): extensions = [extensions] if len(banned) > 0: l_banned = [] for i in banned: l_banned.append(gfp.clean(f"{path}/{i}")) banned = l_banned paths = [] if recursive: # does only work with recursive. for root, dirs, files in os.walk(path): if not dirs_only: for name in files: if name not in banned_names and ("*" in extensions or gfp.extension(name=name) in extensions ): l_path = gfp.clean(path=f"{root}/{name}") l_banned = False for i in banned_basenames: if f"/{i}/" in l_path: l_banned = True ; break if l_path not in banned and not l_banned and l_path+"/" not in banned: paths.append(l_path) if not files_only: for name in dirs: if name not in banned_names and (dirs_only or "*" in extensions or "dir" in extensions ): l_path = gfp.clean(path=f"{root}/{name}/") l_banned = False for i in banned_basenames: if f"/{i}/" in l_path: l_banned = True ; break if l_path not in banned and not l_banned and l_path+"/" not in banned: paths.append(l_path) if recursive: paths += self.paths(recursive=recursive, path=l_path, dirs_only=dirs_only, files_only=files_only, banned=banned, banned_names=banned_names, empty_dirs=empty_dirs) else: for name in os.listdir(path): l_path = gfp.clean(path=f"{path}/{name}") if not dirs_only and not Files.directory(l_path): if name not in banned_names and ("*" in extensions or gfp.extension(name=name) in extensions ): l_banned = False for i in banned_basenames: if f"/{i}/" in l_path: l_banned = True ; break if l_path not in banned and not l_banned and l_path+"/" not in banned: paths.append(l_path) if not files_only and Files.directory(l_path): l_path += "/" if name not in banned_names and (dirs_only or "*" in extensions or "dir" in extensions ): l_banned = False for i in banned_basenames: if f"/{i}/" in l_path: l_banned = True ; break if l_path not in banned and not l_banned and l_path+"/" not in banned: paths.append(l_path) return paths def names(self, # get recursively (bool). recursive=False, # get files only (bool). files_only=False, # get firs only (bool). dirs_only=False, # also get empty dirs (bool). empty_dirs=True, # remove the extension names (bool). remove_extensions=False, # the banned full paths (list). banned=[], # the banned names (list). banned_names=[".DS_Store"], # the banend base names (list). banned_basenames=["__pycache__"], # the allowed extensions (list). extensions=["*"], # the path (leave None to use self.path) (str, FilePath). path=None, ): names = [] for _path_ in self.paths(dirs_only=dirs_only, files_only=files_only, empty_dirs=empty_dirs, recursive=recursive, path=path, banned=banned, banned_names=banned_names, extensions=extensions): if remove_extensions: name = gfp.name(path=_path_) names.append(name[:-len(gfp.extension(name=name))]) else: names.append(gfp.name(path=_path_)) return names def oldest(self): files = [] for i in os.listdir(self.file_path.path): if i not in [".DS_Store"]: path = f'{self.file_path.path}/{i}'.replace("//",'/') files.append(path) if len(files) == 0: return False return min(files, key=os.path.getctime) # oldest is min (this is not a code error) def newest(self): files = [] for i in os.listdir(self.file_path.path): if i not in [".DS_Store"]: path = f'{self.file_path.path}/{i}'.replace("//",'/') files.append(path) if len(files) == 0: return False return max(files, key=os.path.getctime) # newest is max (this is not a code error) def random(self): files = [] for i in os.listdir(self.file_path.path): if i not in [".DS_Store"]: path = f'{self.file_path.path}/{i}'.replace("//",'/') files.append(path) if len(files) == 0: return False return files[random.randrange(0, len(files))] def generate(self, length=24, type="/"): path, paths = None, self.paths() for x in range(1000): path = self.join(utils.generate.shell_string(length=length), type) if path not in paths: break if path == None: __error__("Failed to generate a new random path inside directory [{}].".format(self.file_path.path)) return path def structured_join(self, name, type="", structure="alphabetical", create_base=False, sudo=False, owner=None, group=None, permission=None): if type not in ["/", ""]: type = "."+type if structure == "alphabetical": alphabetical = None try: alphabetical = name[0].upper() except: alphabetical = "SPECIAL" if str(alphabetical) not in ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Z","0","1","2","3","4","5","6","7","8","9"]: aplhabetical = "SPECIAL" base = self.file_path.path + "/" + alphabetical + "/" if create_base and os.path.exists(base) == False: self.create(path=base, sudo=sudo, owner=owner, group=group, permission=permission) alph_dir = base + name + type return alph_dir else: raise ValueError("Invalid usage, parameter structure [{}], valid options: {}".format(structure, ["alphabetical"])) def contains(self, name=None, type="/", recursive=False): return self.join(name, type) in self.paths(recursive=recursive) # def subpath(self, fullpath): return self.fp.clean(path=fullpath.replace(self.fp.path, ""), remove_double_slash=True) def fullpath(self, subpath): return self.fp.clean(path=f"{self.fp.path}/{subpath}", remove_double_slash=True) # set the icon. def set_icon(self, # the path to the .png / .jpg icon. icon=None, # the directory path (leave None to use self.fp.path). path=None, ): if icon == None: raise Exceptions.InvalidUsage("Define parameter: icon.") if path == None: path = self.fp.path if OS in ["osx", "macos"]: utils.__execute_script__(f""" #!/bin/bash # settings. icon="{icon}" dest="{path}" # check inputs if [ ! -f $icon ]; then echo "ERROR: File $1 does not exists" exit 1 elif [[ ! $icon =~ .*\.(png|PNG|jpg|JPG) ]]; then echo "ERROR: Icon must be a .png|.jpg file" exit 1 elif [ -f $dest ]; then folder=false elif [ -d $dest ]; then folder=true else echo 'ERROR: File|Folder destination does not exists' exit 1 fi # create icns icon sips -i $icon > /dev/null DeRez -only icns $icon > /tmp/tmpicns.rsrc # set icon if [ "$folder" = true ]; then Rez -append /tmp/tmpicns.rsrc -o $dest$'/Icon\r' SetFile -a C $dest SetFile -a V $dest$'/Icon\r' else Rez -append /tmp/tmpicns.rsrc -o $dest SetFile -a C $dest fi # clean up rm /tmp/tmpicns.rsrc exit 0 """) else: raise OSError("Unsupported operating system.") # index the content. def index(self, # the wanted options. metrics=[], options=["size", "mtime", "content", "name", "basename", "extension", "mount", "directory"], # optional path (leave None to use self.path). path=None, ): def process(path): info = {} if "mtime" in metrics: info["mtime"] = gfp.mtime(path=path, format="seconds") if "size" in metrics: info["size"] = gfp.size(path=path, format=int) directory = None if "directory" in metcics: directory = info["directory"] = Files.directory(str(path)) if "content" in metrics: if directory == None: raise Exceptions.InvalidUsage("Metric [directory] is required when obtaining metric [content].") if not directory: info["content"] = Files.load(path) else: info["content"] = None if "mount" in metrics: info["mount"] = os.path.ismount(str(path)) if "name" in metrics: info["name"] = gfp.name(path=path) if "extension" in metrics: info["name"] = gfp.extension(path=path) if "basename" in metrics: info["basename"] = gfp.basename(path=path) return info # if path == None: path = self.path if metrics == []: raise Exceptions.InvalidUsage(f'No metrics are specified, metric options: [{Array(options).string(joiner=" ")}].') for i in metrics: if i not in options: raise Exceptions.InvalidUsage(f'Metric [{i}] is not a valid metric option, options: [{Array(options).string(joiner=" ")}].') indexed, dir, ids = Dictionary(path=False, dictionary={}), Files.Directory(path=path), [] for _path_ in dir.paths(recursive=True, files_only=True, banned=[gfp.clean(f"{path}/Icon\r")], banned_names=[".DS_Store", "__pycache__"]): if _path_ not in ids and "/__pycache__/" not in _path_ and "/.DS_Store" not in _path_: indexed[_path_] = process(_path_) ids.append(_path_) for _path_ in dir.paths(recursive=True, dirs_only=True, banned=[gfp.clean(f"{path}/Icon\r")], banned_names=[".DS_Store", "__pycache__"]): if _path_ not in ids and "/__pycache__/" not in _path_ and "/.DS_Store" not in _path_: indexed[_path_] = process(_path_) ids.append(_path_) return indexed.sort(alphabetical=True) # open for desktop. def open(self, path=None, sudo=False): if path == None: path = self.fp.path if sudo: sudo = "sudo " else: sudo = "" if OS in ["macos"]: os.system(f"{sudo}open {path}") elif OS in ["linux"]: os.system(f"{sudo}nautulis {path}") else: raise Exceptions.InvalidOperatingSystem(f"Unsupported operating system [{OS}].") # return references of each file that includes one of the matches. def find(self, matches:list, path=None, recursive=False, log_level=0): if path == None: path = self.path gfp = Formats.FilePath("") c, references = 0, {} for string in matches: if not os.path.exists(path): raise ValueError(f"Path {path} does not exist.") elif not Files.directory(path): raise ValueError(f"Path {path} is not a directory.") for i_path in self.paths(recursive=recursive, files_only=True, banned_names=[".DS_Store", ".git"], path=path): data = None try: data = Files.load(i_path) except: try: data = f"{Files.load(i_path, format=bytes)}" except: data = None if data != None and string in data: if log_level >= 0: print("") print(f"{i_path}:") lines, linecount = data.split("\n"), 0 for _ in lines: if string in lines[linecount]: try: before = lines[linecount-1] except: before = None try: after = lines[linecount+1] except: after = None if log_level >= 0: if before != None: print(" * "+before) print(" * "+lines[linecount]) if after != None: print(" * "+after) references[i_path] = lines[linecount] linecount += 1 c += 1 if log_level >= 0 and c > 0: print("") return references # replace str within all files. def replace(self, replacements:list, path=None, recursive=False, log_level=0): if path == None: path = self.path gfp = Formats.FilePath("") c, updates = 0, [] for from_, to in replacements: if not os.path.exists(path): raise ValueError(f"Path {path} does not exist.") elif not Files.directory(path): raise ValueError(f"Path {path} is not a directory.") for path in self.paths(recursive=recursive, banned_names=[".DS_Store", ".git"], path=path): if not Files.directory(path): try: data = Files.load(path) except UnicodeDecodeError: a=1 if from_ in data: if log_level >= 0: loader = console.Loader(f"Updating file {path}.") Files.save(path, data.replace(from_, to)) if log_level >= 0: loader.stop() updates.append(path) c += 1 return updates # filepath shortcuts. def join(self, name=None, type=""): return self.file_path.join(name, type) def name(self): return self.file_path.name() def base(self): return self.file_path.base() def basename(self): return self.file_path.basename() # support default iteration. def __iter__(self): return iter(self.paths()) # support '>=' & '>' operator. def __gt__(self, directory): if not isinstance(directory, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {directory.__class__}.") return len(self.paths()) > len(directory.paths()) def __ge__(self, directory): if not isinstance(directory, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {directory.__class__}.") return len(self.paths()) >= len(directory.paths()) # support '<=' & '<' operator. def __lt__(self, directory): if not isinstance(directory, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {directory.__class__}.") return len(self.paths()) < len(directory.paths()) def __le__(self, directory): if not isinstance(directory, self.__class__): raise Exceptions.FormatError(f"Can not compare object {self.__class__} & {directory.__class__}.") return len(self.paths()) <= len(directory.paths()) # support '==' & '!=' operator. def __eq__(self, directory): if not isinstance(directory, self.__class__): return False return len(self.paths()) == len(directory.paths()) def __ne__(self, directory): if not isinstance(directory, self.__class__): return True return len(self.paths()) != len(directory.paths()) # support 'in' operator. def __contains__(self, path): paths = self.paths() if isinstance(path, (list, Files.Array)): for i in path: if i in paths: return True return False else: return path in paths # representation. def __repr__(self): return str(self) # # system functions. def __str__(self): return str(self.fp.path) # content count. def __len__(self): return len(self.paths()) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Directory" # @property def __name__(self): return self.instance() # return raw data. def raw(self): return self.fp.path # # the image object class. class Image(object): def __init__(self, path=None, image=None, load=False): # docs. DOCS = { "module":"Image", "initialized":False, "description":[], "chapter": "Defaults", } # init. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: self.file_path = self.fp = Formats.FilePath(path) self.image = image if load: self.load() # def load(self, path=None): if path == None: path = self.file_path.path self.image = Image.open(path) def edit_pixel(self, pixel=[0, 0], new_pixel_tuple=None): pixel = self.image.load() pix[15, 15] = value self.image.save(self.file_path.path) def convert(self, # the input path (str, FilePath) (#1). output=None, # the input path (str, FilePath) (leave None to use self.fp.path) input=None, ): if input == None: input = self.fp.path if output == None: raise Exceptions.InvalidUsage("Define parameter: [output].") img = _Image_.open(str(input)) img.save(str(output)) print(f"Successfully converted image {input} to {output}.") def replace_pixels(self, input_path=None, output_path=None, input_hex=None, output_hex=None): img = _Image_.open(input_path) pixels = img.load() input_rgb, output_rgb = input_hex, output_hex # self.hex_to_rgb(input_hex), self.hex_to_rgb(output_hex) for i in range(img.size[0]): for j in range(img.size[1]): print(pixels[i,j], "VS", input_rgb) if pixels[i,j] == input_rgb: pixels[i,j] = output_rgb img.save(output_path) def replace_colors(self, input_path=None, output_path=None, hex=None): img = _Image_.open(input_path) pixels = img.load() rgb = hex #self.hex_to_rgb(hex) for i in range(img.size[0]): for j in range(img.size[1]): if pixels[i,j] != rgb and pixels[i,j] != (0, 0, 0, 0): pixels[i,j] = rgb img.save(output_path) def rgb_to_hex(self, tuple): return '#%02x%02x%02x' % tuple def hex_to_rgb(self, _hex_): return tuple(int(_hex_[i:i+2], 16) for i in (0, 2, 4)) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Image" # @property def __name__(self): return self.instance() # return raw data. def raw(self): return self.fp.path # suport eq. def __eq__(self, var): if var.__class__.__name__ in ["NoneType"]: return False else: return str(var) == str(self) def __ne__(self, var): if var.__class__.__name__ in ["NoneType"]: return True else: return str(var) != str(self) # repr. def __str__(self): return str(self.fp) def __repr__(self): return str(self) # # # the zip object class. class Zip(object): def __init__(self, path=None, check=False): # docs. DOCS = { "module":"Zip", "initialized":False, "description":[], "chapter": "Defaults", } # init. self.file_path = self.fp = Formats.FilePath(path, check=check) # def create(self, # source can either be a string or an array. source=None, # remove the source file(s). remove=False, # sudo required to move/copy source files. sudo=False, ): # create tmp dir. name = self.file_path.name().replace('.encrypted.zip','').replace("."+self.file_path.extension(),'') tmp = Formats.FilePath(f'/tmp/zip-{utils.generate.shell_string(24)}') tmp_content = Formats.FilePath(tmp.join(name, "")) if tmp.exists(): tmp.delete(forced=True) if os.path.exists(tmp.path):os.system(f"rm -fr {tmp.path}") os.system(f"mkdir -p {tmp.path}") if isinstance(source, str): target = Formats.FilePath(source) name = target.name().replace('.encrypted.zip','').replace("."+target.extension(),'') if remove: target.move(tmp_content.path, sudo=sudo) else: target.copy(tmp_content.path, sudo=sudo) elif isinstance(source, list): tmp_content.create(directory=True) for path in source: file_path = Formats.FilePath(path) if remove: file_path.move("/"+tmp_content.join('/'+file_path.name(),"/"), sudo=sudo) else: file_path.copy("/"+tmp_content.join('/'+file_path.name(),"/"), sudo=sudo) else: raise ValueError("Parameter [source] must either be a str or list.") # write out zip. base = self.file_path.base() format = self.file_path.extension() archive_from = os.path.dirname(tmp_content.path) archive_to = os.path.basename(tmp_content.path.strip(os.sep)) zip_path = shutil.make_archive(name, format, archive_from, archive_to) os.system(f'mv {zip_path} {self.file_path.path}') tmp.delete(forced=True, sudo=sudo) # def extract(self, # the base extract directory. base=None, # remove the zip after extraction. remove=False, # if sudo required for removing file path. sudo=False,): # extract. if base == None: base = self.file_path.base() with zipfile.ZipFile(self.file_path.path, 'r') as zip_ref: zip_ref.extractall(base) if remove: self.file_path.delete(forced=True, sudo=sudo) # # representation. def __repr__(self): return str(self) # # system functions. def __str__(self): return self.fp.path # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Zip" # @property def __name__(self): return self.instance() # return raw data. def raw(self): return self.fp.path # # # the bytes object class. class Bytes(object): def __init__(self, # the bytes (param #1). data=b"", # the path (str, FilePath) (param #2). path=False, # load the data on initialization. load=False, # the default array (will be created if file path does not exist). default=None, ): # docs. DOCS = { "module":"Bytes", "initialized":False, "description":[], "chapter": "Defaults", } # check self instance. if isinstance(data, Files.Bytes): data = data.bytes # bytes. self.bytes = bytes # path. if path == False: self.file_path = self.fp = None # used in local memory (not fysical) else: self.file_path = self.fp = Formats.FilePath(path) if default != None and not Files.exists(self.file_path.path): self.save(array=default) if load: self.load() # def load(self, sudo=False): bytes = Files.load(self.file_path.path, format="bytes", sudo=sudo) self.bytes = bytes return bytes def save(self, bytes=None, sudo=False): if bytes == None: bytes = self.bytes bytes = Formats.denitialize(bytes) self.bytes = bytes return Files.save(self.fp.path, bytes, format="bytes", sudo=sudo) # suppor default iteration. def __iter__(self): return iter(self.bytes) # support '==' & '!=' operator. def __eq__(self, bytes_): if isinstance(bytes_, bytes): return self.bytes == bytes_ elif not isinstance(bytes_, self.__class__): return False return self.bytes == bytes_.bytes def __ne__(self, bytes_): if isinstance(bytes_, bytes): return self.bytes != bytes_ elif not isinstance(bytes_, self.__class__): return True return self.bytes != bytes_.bytes # support 'in' operator. def __contains__(self, bytes_): if isinstance(bytes_, (list, Files.Array)): for i in bytes_: if i == self.bytes: return True return False else: return bytes_ in self.bytes # # representation. def __repr__(self): return str(self) # # str representation. def __str__(self): return str(self.bytes) # content count. def __len__(self): return len(self.bytes) # object id. def __id__(self): return f"({self.instance()}:{str(self)})" # object instance. def instance(self): return "Bytes" # @property def __name__(self): return self.instance() # support self assignment. def assign(self, b): if isinstance(b, self.__class__): b = b.bytes self.bytes = b return self # return raw data. def raw(self): return self.bytes # # # # # some default classes. class Classes(): # the speed class. class Speed(): # the mark function, returns a timestamp used for calculation. def mark(): return time.time() # # calculate the difference between the marked timestamp & the current. def calculate( # the marked timestamp from Speed.mark. stamp, # the current timestamp (leave None to use Speed.mark) current=None, # round to decimals (Leave None to ignore). decimals=None, # normalize seconds. normalize=False, ): if current == None: current = Speed.mark() diff = current - stamp if decimals != None: diff = round(diff, decimals) if normalize: diff = Speed.normalize_seconds(diff) return diff # normalize seconds to 10s or 1m etc. def normalize_seconds(seconds:(int,float), decimals=1): if seconds < 0: raise ValueError("Can not normalize negative seconds.") if seconds < 0.01: return f'{int(seconds*1000)}ms' elif seconds <= 60: return f'{int(seconds)}s' elif seconds <= 60*60: return f'{round(seconds/60, decimals)}m' elif seconds <= 60*60*24: return f'{round(seconds/(60*60), decimals)}h' elif seconds <= 60*60*24*30: return f'{round(seconds/(60*60*24), decimals)}d' elif seconds <= 60*60*24*30*12: return f'{round(seconds/(60*60*24*30), decimals)}m' else: return f'{round(seconds/(60*60*24*30*12), decimals)}y' # some default objects. class Objects(): # the generate object class. class Generate(object): def __init__(self): # docs. DOCS = { "module":"Generate", "initialized":False, "description":[], "chapter": "Defaults", } # def int(self, length=6): charset = Array(Formats.digits).string(joiner="") return ''.join(random.choice(charset) for x in range(length)) # def string(self, length=6, capitalize=True, digits=True): charset = Array(Formats.alphabet).string(joiner="") if capitalize: charset += Array(Formats.capitalized_alphabet).string(joiner="") if digits: charset += Array(Formats.digits).string(joiner="") return ''.join(random.choice(charset) for x in range(length)) # # the interval object class. class Interval(object): def __init__(self, # the sleep time. sleeptime=1, # the timeout. timeout=60, ): # docs. DOCS = { "module":"Interval", "initialized":False, "description":[], "chapter": "Defaults", } # attributes. self.sleeptime = sleeptime self.timeout = timeout # def __int__(self): return int(self.sleeptime) def __iter__(self): l = [] for _ in range(int(self.timeout/self.sleeptime)): l.append(self) return iter(l) def sleep(self, chapters=1): for _ in range(chapters): time.sleep(int(self)/chapters) # #for interval in Interval(sleeptime=60, timeout=3600): # ... # interval.sleep() # # shortcuts. FilePath = Formats.FilePath String = Formats.String Boolean = Formats.Boolean Integer = Formats.Integer Date = Formats.Date File = Files.File Directory = Files.Directory Zip = Files.Zip Image = Files.Image Bytes = Files.Bytes Dictionary = Files.Dictionary Array = Files.Array Speed = Classes.Speed Generate = Objects.Generate Interval = Objects.Interval # initialized objects. gfp = Formats.FilePath("") # is required (do not remove). gd = gdate = Formats.Date() #
2.6875
3
object_torus.py
KeerthanBhat/pygame-Search-the-Key
0
11546
import pygame from pygame.locals import * from OpenGL.GL import * from OpenGL.GLU import * from OpenGL.GLUT import * def main(): pygame.init() glutInit() display = (800,600) pygame.display.set_mode(display, DOUBLEBUF|OPENGL) gluPerspective(45, (display[0]/display[1]), 0.1, 50.0) glTranslatef(0.0, 0.0, -5) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() glRotatef(1, 0, 1, 0) glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) glColor3f(0.0, 1.0, 0.0) glutWireTorus(0.2, 0.8, 50, 50) pygame.display.flip() pygame.time.wait(10) main()
3.015625
3
samples/snippets/translate_v3_batch_translate_text_with_glossary_and_model.py
renovate-bot/python-translate
70
11547
<gh_stars>10-100 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START translate_v3_batch_translate_text_with_glossary_and_model] from google.cloud import translate def batch_translate_text_with_glossary_and_model( input_uri="gs://YOUR_BUCKET_ID/path/to/your/file.txt", output_uri="gs://YOUR_BUCKET_ID/path/to/save/results/", project_id="YOUR_PROJECT_ID", model_id="YOUR_MODEL_ID", glossary_id="YOUR_GLOSSARY_ID", ): """ Batch translate text with Glossary and Translation model """ client = translate.TranslationServiceClient() # Supported language codes: https://cloud.google.com/translate/docs/languages location = "us-central1" target_language_codes = ["ja"] gcs_source = {"input_uri": input_uri} # Optional. Can be "text/plain" or "text/html". mime_type = "text/plain" input_configs_element = {"gcs_source": gcs_source, "mime_type": mime_type} input_configs = [input_configs_element] gcs_destination = {"output_uri_prefix": output_uri} output_config = {"gcs_destination": gcs_destination} parent = f"projects/{project_id}/locations/{location}" model_path = "projects/{}/locations/{}/models/{}".format( project_id, "us-central1", model_id ) models = {"ja": model_path} glossary_path = client.glossary_path( project_id, "us-central1", glossary_id # The location of the glossary ) glossary_config = translate.TranslateTextGlossaryConfig(glossary=glossary_path) glossaries = {"ja": glossary_config} # target lang as key operation = client.batch_translate_text( request={ "parent": parent, "source_language_code": "en", "target_language_codes": target_language_codes, "input_configs": input_configs, "output_config": output_config, "models": models, "glossaries": glossaries, } ) print("Waiting for operation to complete...") response = operation.result() # Display the translation for each input text provided print("Total Characters: {}".format(response.total_characters)) print("Translated Characters: {}".format(response.translated_characters)) # [END translate_v3_batch_translate_text_with_glossary_and_model]
2.15625
2
movo_common/movo_third_party/executive_smach/smach_ros/test/concurrence.py
zkytony/kinova-movo
2
11548
<filename>movo_common/movo_third_party/executive_smach/smach_ros/test/concurrence.py #!/usr/bin/env python import roslib; roslib.load_manifest('smach_ros') import rospy import rostest import unittest from actionlib import * from actionlib.msg import * from smach import * from smach_ros import * from smach_msgs.msg import * # Static goals g1 = TestGoal(1) # This goal should succeed g2 = TestGoal(2) # This goal should abort g3 = TestGoal(3) # This goal should be rejected ### Custom tate classe class Setter(State): """State that sets the key 'a' in its userdata""" def __init__(self): State.__init__(self,['done'],[],['a']) def execute(self,ud): ud.a = 'A' rospy.loginfo("Added key 'a'.") return 'done' class Getter(State): """State that grabs the key 'a' from userdata, and sets 'b'""" def __init__(self): State.__init__(self,['done','preempted'],['a'],['b']) def execute(self,ud): while 'a' not in ud: rospy.loginfo("Waiting for key 'a' to appear. ") rospy.sleep(0.1) ud.b = ud.a rospy.sleep(1.0) if self.preempt_requested(): return 'preempted' return 'done' ### Test harness class TestStateMachine(unittest.TestCase): def test_concurrence(self): """Test concurrent container.""" sm = StateMachine(['done','succeeded']) with sm: cc = Concurrence(['succeeded','done'], default_outcome = 'done', outcome_map = {'succeeded':{'SETTER':'done'}}) sm.add('CONCURRENT',cc) with cc: Concurrence.add('SETTER', Setter()) Concurrence.add('GETTER', Getter()) outcome = sm.execute() assert outcome == 'succeeded' assert 'a' in cc.userdata assert 'b' in cc.userdata assert cc.userdata.a == 'A' assert cc.userdata.b == 'A' def test_preempt(self): """Test concurrent container that preempts siblings.""" cc = Concurrence(['succeeded','done'], default_outcome = 'done', child_termination_cb = lambda so: True, outcome_map = {'succeeded':{'SETTER':'done', 'GETTER':'preempted'}}) with cc: Concurrence.add('SETTER', Setter()) Concurrence.add('GETTER', Getter()) outcome = cc.execute() assert outcome == 'succeeded' assert 'a' in cc.userdata assert 'b' in cc.userdata assert cc.userdata.a == 'A' assert cc.userdata.b == 'A' def test_no_preempt(self): """Test concurrent container that doesnt preempt siblings.""" cc = Concurrence(['succeeded','done'], default_outcome = 'done', child_termination_cb = lambda so: False, outcome_map = { 'succeeded':{ 'SETTER':'done', 'GETTER':'done'}}) with cc: Concurrence.add('SETTER', Setter()) Concurrence.add('GETTER', Getter()) outcome = cc.execute() assert outcome == 'succeeded' assert 'a' in cc.userdata assert 'b' in cc.userdata assert cc.userdata.a == 'A' assert cc.userdata.b == 'A' def test_outcome_cb(self): """Test concurrent container that doesnt preempt siblings.""" cc = Concurrence(['succeeded','done'], default_outcome = 'done', child_termination_cb = lambda so: False, outcome_cb = lambda so: list(set(so.values()))[0]) with cc: Concurrence.add('SETTER', Setter()) Concurrence.add('GETTER', Getter()) outcome = cc.execute() assert outcome == 'done' assert 'a' in cc.userdata assert 'b' in cc.userdata assert cc.userdata.a == 'A' assert cc.userdata.b == 'A' def main(): rospy.init_node('concurrence_test',log_level=rospy.DEBUG) rostest.rosrun('smach', 'concurrence_test', TestStateMachine) if __name__=="__main__": main();
1.914063
2
KaratAPP/models.py
MHuiG/Karat-Django-Backend
0
11549
from django.db import models # Create your models here. ########################################################################## #投票 class Vote(models.Model): data=models.CharField(max_length=255) ##########################################################################
2.421875
2
arguments.py
nudles/a2c
0
11550
import argparse import torch def get_args(): parser = argparse.ArgumentParser(description='RL') parser.add_argument('--algo', default='a2c', help='algorithm to use: a2c | ppo ') parser.add_argument('--lr', type=float, default=7e-5, help='learning rate (default: 7e-4)') parser.add_argument('--eps', type=float, default=1e-5, help='RMSprop optimizer epsilon (default: 1e-5)') parser.add_argument('--alpha', type=float, default=0.99, help='RMSprop optimizer apha (default: 0.99)') parser.add_argument('--gamma', type=float, default=0.99, help='discount factor for rewards (default: 0.99)') parser.add_argument('--max-grad-norm', type=float, default=0.5, help='max norm off gradients (default: 0.5)') parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)') parser.add_argument('--num-processes', type=int, default=1, help='how many training CPU processes to use (default: 16)') parser.add_argument('--num-steps', type=int, default=32, help='number of forward steps in A2C (default: 5)') parser.add_argument('--clip-param', type=float, default=0.2, help='clip parameter (default: 0.2)') parser.add_argument('--log-interval', type=int, default=50, help='log interval, one log per n updates (default: 10)') parser.add_argument('--num-frames', type=int, default=80000, help='number of frames to train (default: 10e6)') parser.add_argument('--cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--obs_size', type=int, default=200, help='observation vector size') parser.add_argument('--cycle_len', type=int, default=500, help='observation vector size') parser.add_argument('--debug', action='store_true', default=False, help='whether to record the logfile') parser.add_argument('--num_models', type=int, default=3, help='number of the model to use') parser.add_argument('--beta', type=float, default=1, help='balance the accuracy and latency when calculate the reward') parser.add_argument('--tau', type=float, default=2, help='max waiting time for enqueue') parser.add_argument('--max_latency', type=float, default=16, help='accept latency for each request') parser.add_argument('--policy', choices=['async', 'sync'], default='async', help='policy') args = parser.parse_args() print("cuda: %s" % str(args.cuda)) if args.cuda: assert torch.cuda.is_available(), 'CUDA is not available in this machine!' return args if __name__ == '__main__': get_args()
2.46875
2
vega/algorithms/nas/__init__.py
wnov/vega
6
11551
from .backbone_nas import * from .adelaide_ea import * from .sr_ea import * from .esr_ea import * from .darts_cnn import * from .cars import * from .fis import * from .auto_lane import * from .mfkd import *
1
1
ai.py
s18mbbustorff/AI_Hanabi_Assignment
0
11552
<filename>ai.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Mar 9 12:27:15 2021 @author: kuba """ import copy import numpy as np w = {"H0": {"H0":0.2,"H1": 0.2, "P0": 0.5, "P1": 0.1}, "H1": {"H0":0.2,"H1": 0.2, "P0": 0.1, "P1": 0.5}, "P0": {"H0":0.3,"H1": 0.3, "P0": 0.2, "P1": 0.2}, "P1": {"H0":0.3,"H1": 0.3, "P0": 0.2, "P1": 0.2} } class BeliefSpace: def __init__(self, state): self.states = [] return self.states class State: def __init__(self,player,cards1,cards2,table,deck, parent): self.parent = parent self.depth = 0 self.value = 0 self.player = player #player that has the turn, either 1 or 2 (int) self.cards1 = cards1 #list of cards in player one's hand (Card list) 2cards that need to be created with the Card object self.cards2 = cards2 #list of cards in AI 's hand (Card list) 2cards that need to be created with the Card object self.table = table #list of card numbers in the table (int list) #/!\initial table should contain a 0 for the Play action to work self.deck = deck #number of cards left in the deck (int) tableCards =[] for nb in table: tableCards.append(Card(nb)) self.discoveredCards = cards1+cards2+tableCards #list of all the cards that are out of the deck (list) class Card(): #----------------------------- #-----Initialization functions #----------------------------- def __init__(self,color,number): self.color = color self.number = number self.colorHinted = False self.numberHinted = False class Actions: def Hint(initialstate,side): newstate = copy.deepcopy(initialstate) #side is an integer, 0 = left, 1 = right newstate.parent = initialstate newstate.depth = initialstate.depth+1 if initialstate.player == 1: newstate.cards2[side].known=True newstate.player = 2 elif initialstate.player == 2: newstate.cards1[side].known=True newstate.player = 1 return [newstate] def Play(initialstate,side): #side is an integer, 0 = left, 1 = right newstate = copy.deepcopy(initialstate) newstate.parent = initialstate newstate.depth = initialstate.depth+1 #------------------------ #if no cards left in deck if initialstate.deck == 0: if initialstate.player == 1: playedcard = initialstate.cards1[side] if playedcard.number == (max(initialstate.table)+1): #check if it is a correct card newstate.table.append(playedcard.number) #it is added to the table of the new state newstate.cards1[side] = None #remove card from hand newstate.player = 2 #change player turn elif initialstate.player == 2: playedcard = initialstate.cards2[side] if playedcard.number == (max(initialstate.table)+1): #if it is a correct card newstate.table.append(playedcard.number) #it is added to the table of the new state newstate.cards2[side] = None newstate.player = 1 return [newstate] #---------------------------- #if there are cards left in the deck, we need to make a new state for each possibility of a new card #the function will return a list of new states else: #initializing the list of newstates nbCardsLeft = initialstate.deck newstates = [None] * nbCardsLeft for i in range(nbCardsLeft): newstates[i] = copy.deepcopy(newstate) #making a list of all the possible numbers left discoveredNumbers = [] for card in initialstate.discoveredCards: discoveredNumbers.append(card.number) allNumbers = [1,2,3,4,5] numbersLeft = [x for x in allNumbers if x not in discoveredNumbers] #updating all the new states with all possible new cards #then removing the played card (add its number to table if correct) if initialstate.player == 1: playedcard = initialstate.cards1[side] for i in range(nbCardsLeft): newstates[i].cards1[side] = Card(numbersLeft[i]) #old card that was played gets replaced by new card if playedcard.number == (max(initialstate.table)+1): #if it is a correct card newstates[i].table.append(playedcard.number) #it is added to the table of the new state newstates[i].player = 2 elif initialstate.player == 2: playedcard = initialstate.cards2[side] for i in range(nbCardsLeft): newstates[i].cards2[side] = Card(numbersLeft[i]) #old card that was played gets replaced by new card if playedcard.number == (max(initialstate.table)+1): #if it is a correct card newstate.table.append(playedcard.number) #its number is added to the table of the new state newstates[i].player = 1 for state in newstates: state.deck = initialstate.deck-1 return newstates class Solver: def __init__(self,max_depth, hand_size,actions): self.max_depth = max_depth self.hand_size = hand_size self.actions = actions def utility(self, state): return 10 * len(state.table) """ def forward(self, beliefspace, actions): visited = [] queue = [] terminal_nodes = [] for state in beliefspace: visited.append(state) queue.append(state) while queue: s = queue.pop(0) if s.depth < self.max_depth: for action in actions: for side in [0, 1]: children = action(s, side) for child in children: queue.append(child) visited.append(child) print(child.depth) if child.depth == self.max_depth: terminal_nodes.append(child) return terminal_nodes """ """ def forward2(self, beliefspace, actions): results = [] for state in beliefspace: children = [(self.weighted_value(action(state, pos)[0], a_id + str(pos)),a_id, pos) for (action,a_id) in actions for pos in np.arange(self.hand_size)] print(children) results.append(sorted(children, key=lambda tup: tup[0])[-1]) return results def max_value(self, state): global w if state.depth >= self.max_depth: return self.utility(state) v = - np.inf for (a,a_id) in actions: for s in range(2): v = np.amax(v,self.weighted_value(a(state,s)[0],a_id + str(s))) return v def weighted_value(self, state, act_id): global w weights = w[act_id] if state.depth >= self.max_depth: return self.utility(state) v = 0 for (a,a_id) in actions: for s in range(2): v = v + weights[a_id+str(s)]*self.max_value(a(state,s)[0]) return v """ def evaluate(self, beliefspace, actions): results = [] for state in beliefspace: children = [(self.weighted_value(action(state, pos)[0], a_id + str(pos)),a_id, pos) for (action,a_id) in actions for pos in np.arange(self.hand_size)] print(children) results.append(sorted(children, key=lambda tup: tup[0])[-1]) return results def max_value(self, state): global w if state.depth >= self.max_depth: return self.utility(state) v = - np.inf for (a,a_id) in actions: for s in range(2): v = np.amax(v,self.weighted_value(a(state,s)[0],a_id + str(s))) return v def weighted_value(self, state, act_id): global w weights = w[act_id] if state.depth >= self.max_depth: return self.utility(state) v = 0 for (a,a_id) in actions: for s in range(2): v = v + weights[a_id+str(s)]*self.max_value(a(state,s)[0]) return v if __name__ == "__main__": c1 = Card(1) c2 = Card(2) c3 = Card(3) c4 = Card(4) c5 = Card(5) cards1 = [c5, c1] cards2 = [c2, c4] table = [0] deck = 1 parent = None player = 2 state = State(player,cards1,cards2,table,deck, parent) initial_belief_states = [state] solver = Solver(2) actions = [(Actions.Play, "P"), (Actions.Hint, "H")] terminal = solver.forward2(initial_belief_states, actions) """ print("Some tests to see the Actions funtioning:") print("0.Initial state with cards: player1: (1,2), player2: (3,4)") state1 = State(1,[Card(1),Card(2)],[Card(4),Card(5)],[0],1,None) print("") print("1.Making a Hint of the 2nd player right card:") state2 = Actions.Hint(state1,1) #check that the card is now "known" and that the player becomes "2" print("Is the card known? {}. What player turn is it after the action? {}.".format(state2[0].cards2[1].known,state2[0].player)) print("") print("2. Playing the correct card from player 1's left (the 1):") state2b = Actions.Play(state1,0) print("New size of deck: {}. New card on the left for player 1: {}. New table: {}. Amount of new states created: {}".format(state2b[0].deck,state2b[0].cards1[0].number,state2b[0].table,len(state2b))) print(state2[0].depth) state3 = Actions.Hint(state2[0],1) print(state3[0].depth) state4 = Actions.Hint(state3[0],1) print(state4[0].depth) """
3.3125
3
(19.12.06) Culminating/sprites.py
bly852/ICS3U1
0
11553
# course: ICS3U1 2019 # exercise: Culminating Activity # date: 2019-12-06 # student number: 340926187 # name: <NAME> # description: Two players (Mr Chun & Mr Pileggi) running around the school # collecting food for the food drive. # sprite classes import pygame import random import math import os from settings import * class Player(pygame.sprite.Sprite): """ player class that contains all data and functions related to the player """ def __init__(self, game, x, y, playerNum): """ initalizes a player sprite when an instance is created in the game parameter, at the x and y paramters, and with the player number """ self.playerNum = playerNum self.groups = game.all_sprites, game.players pygame.sprite.Sprite.__init__(self, self.groups) self.game = game # image selection for each player if self.playerNum == 1: self.image = pygame.transform.rotate(self.game.player1_image, 90) else: self.image = pygame.transform.rotate(self.game.player2_image, 90) self.rect = self.image.get_rect() # setting the players base movement velocity self.velX, self.velY = 0, 0 # setting the players position on the grid self.x = x * tileSize - tileSize self.y = y * tileSize - tileSize # players starting score self.score = 0 # if joysticks are connected, enable joystick controls for the player self.joystick_count = pygame.joystick.get_count() if self.joystick_count > 0: self.joystick_enabled = True else: self.joystick_enabled = False def get_keys(self): """ checks for all keys pressed and changes the players velocity on that axis to the player speed varaiable """ self.velX, self.velY = 0, 0 keys = pygame.key.get_pressed() # player 1 controls if self.playerNum == 1: if keys[pygame.K_a]: self.velX = -player_speed if keys[pygame.K_d]: self.velX = player_speed if keys[pygame.K_w]: self.velY = -player_speed if keys[pygame.K_s]: self.velY = player_speed # player 2 controls else: if keys[pygame.K_LEFT]: self.velX = -player_speed if keys[pygame.K_RIGHT]: self.velX = player_speed if keys[pygame.K_UP]: self.velY = -player_speed if keys[pygame.K_DOWN]: self.velY = player_speed # if moving diagonally reduce the speed if self.velX > 0 and self.velY > 0: self.velX = player_speed * 0.701 self.velY = player_speed * 0.701 elif self.velX < 0 and self.velY < 0: self.velX = player_speed * -0.701 self.velY = player_speed * -0.701 def get_joystick_axis(self): """ changes the velocity of the character in the x and y based on joystick input """ # joystick controls for two seperate controllers if self.joystick_count == 2: # joystick control for player 1 if self.playerNum == 1: # joystick initialization joystick = pygame.joystick.Joystick(1) joystick.init() # different joystick settings for Xbox controllers if joystick.get_name() == 'Xbox Wireless Controller' or 'Controller (Xbox One For Windows)': # checks for axis movement and changes velX and velY if round(joystick.get_axis(0)) != 0 or round(joystick.get_axis(1)) != 0: self.velX += joystick.get_axis(0) * player_speed self.velY += joystick.get_axis(1) * player_speed else: if round(joystick.get_axis(1)) != 0 or round(joystick.get_axis(0)) != 0: self.velX += joystick.get_axis(1) * player_speed self.velY -= joystick.get_axis(0) * player_speed # joystick control for player 2 elif self.playerNum == 2: # joystick initialization joystick = pygame.joystick.Joystick(0) joystick.init() # Different joystick settings for Xbox controllers if joystick.get_name() == 'Xbox Wireless Controller' or 'Controller (Xbox One For Windows)': # checks for axis movement and changes velX and velY if round(joystick.get_axis(0)) != 0 or round(joystick.get_axis(1)) != 0: self.velX += joystick.get_axis(0) * player_speed self.velY += joystick.get_axis(1) * player_speed else: if round(joystick.get_axis(1)) != 0 or round(joystick.get_axis(0)) != 0: self.velX += joystick.get_axis(1) * player_speed self.velY -= joystick.get_axis(0) * player_speed # joystick controls for a single controller elif self.joystick_count == 1: # joystick control for player 1 if self.playerNum == 1: # joystick initialization joystick = pygame.joystick.Joystick(0) joystick.init() # different joystick settings for Xbox controllers if joystick.get_name() == 'Xbox Wireless Controller' or 'Controller (Xbox One For Windows)': # checks for axis movement and changes velX and velY if round(joystick.get_axis(0)) != 0 or round(joystick.get_axis(1)) != 0: self.velX += joystick.get_axis(0) * player_speed self.velY += joystick.get_axis(1) * player_speed else: if round(joystick.get_axis(1)) != 0 or round(joystick.get_axis(0)) != 0: self.velX += joystick.get_axis(1) * player_speed self.velY -= joystick.get_axis(0) * player_speed # joystick control for player 2 elif self.playerNum == 2: # joystick initialization joystick = pygame.joystick.Joystick(0) joystick.init() # different joystick settings for Xbox controllers if joystick.get_name() == 'Xbox Wireless Controller' or 'Controller (Xbox One For Windows)': # checks for axis movement and changes velX and velY if round(joystick.get_axis(4)) != 0 or round(joystick.get_axis(3)) != 0: self.velX += joystick.get_axis(4) * player_speed self.velY += joystick.get_axis(3) * player_speed else: if round(joystick.get_axis(1)) != 0 or round(joystick.get_axis(0)) != 0: self.velX += joystick.get_axis(2) * player_speed self.velY -= joystick.get_axis(3) * player_speed def direction(self): """ rotates the player sprite based on the current direction and new direction """ # player 1 rotation if self.playerNum == 1: if self.velX > 100: if self.velY < -100: self.image = pygame.transform.rotate(self.game.player1_image, 45) elif self.velY > 100: self.image = pygame.transform.rotate(self.game.player1_image, -45) else: self.image = pygame.transform.rotate(self.game.player1_image, 0) elif self.velX < -100: if self.velY < -100: self.image = pygame.transform.rotate(self.game.player1_image, 135) elif self.velY > 100: self.image = pygame.transform.rotate(self.game.player1_image, -135) else: self.image = pygame.transform.rotate(self.game.player1_image, 180) else: if self.velY < -100: self.image = pygame.transform.rotate(self.game.player1_image, 90) elif self.velY > 100: self.image = pygame.transform.rotate(self.game.player1_image, -90) # player 2 rotation else: if self.velX > 100: if self.velY < -100: self.image = pygame.transform.rotate(self.game.player2_image, 45) elif self.velY > 100: self.image = pygame.transform.rotate(self.game.player2_image, -45) else: self.image = pygame.transform.rotate(self.game.player2_image, 0) elif self.velX < -100: if self.velY < -100: self.image = pygame.transform.rotate(self.game.player2_image, 135) elif self.velY > 100: self.image = pygame.transform.rotate(self.game.player2_image, -135) else: self.image = pygame.transform.rotate(self.game.player2_image, 180) else: if self.velY < -100: self.image = pygame.transform.rotate(self.game.player2_image, 90) elif self.velY > 100: self.image = pygame.transform.rotate(self.game.player2_image, -90) def wall_collision(self, axis): """ checks for player collision with the all wall sprites on the axis given and prevents player movement onto it """ if axis == 'x': collides = pygame.sprite.spritecollide(self, self.game.walls, False) if collides: if self.velX > 0: self.x = collides[0].rect.left - self.rect.width if self.velX < 0: self.x = collides[0].rect.right self.velX = 0 self.rect.x = self.x if axis == 'y': collides = pygame.sprite.spritecollide(self, self.game.walls, False) if collides: if self.velY > 0: self.y = collides[0].rect.top - self.rect.height if self.velY < 0: self.y = collides[0].rect.bottom self.velY = 0 self.rect.y = self.y def player_collision(self, axis): """ checks for player collision with the all wall sprites on the axis given and prevents player movement onto it """ # checks for player 1 collision to player 2 if self.playerNum == 1: if axis == 'x': if self.rect.colliderect(self.game.player2): if self.velX > 0: self.x = self.game.player2.rect.left - self.rect.width if self.velX < 0: self.x = self.game.player2.rect.right self.velX = 0 self.rect.x = self.x if axis == 'y': if self.rect.colliderect(self.game.player2): if self.velY > 0: self.y = self.game.player2.rect.top - self.rect.height if self.velY < 0: self.y = self.game.player2.rect.bottom self.velY = 0 self.rect.y = self.y # checks for player 2 collision to player 1 else: if axis == 'x': if self.rect.colliderect(self.game.player1): if self.velX > 0: self.x = self.game.player1.rect.left - self.rect.width if self.velX < 0: self.x = self.game.player1.rect.right self.velX = 0 self.rect.x = self.x if axis == 'y': if self.rect.colliderect(self.game.player1): if self.velY > 0: self.y = self.game.player1.rect.top - self.rect.height if self.velY < 0: self.y = self.game.player1.rect.bottom self.velY = 0 self.rect.y = self.y def food_collision(self): """ checks for player collision with all food sprites killing any sprites it comes collides with and adding 1 to the players score value """ collides = pygame.sprite.spritecollide(self, self.game.food, True) if collides: self.score += 1 def update(self): """ updates the players position """ self.get_keys() if self.joystick_enabled == True: self.get_joystick_axis() self.direction() self.x += self.velX * self.game.dt self.y += self.velY * self.game.dt self.rect.x = self.x self.wall_collision('x') self.player_collision('x') self.rect.y = self.y self.wall_collision('y') self.player_collision('y') self.food_collision() class Wall(pygame.sprite.Sprite): """ class to contain all the data for wall sprites """ def __init__(self, game, x, y): """ initalizes a wall sprite when an instance is create in the game parameter, at the x and y paramters """ self.groups = game.all_sprites, game.walls pygame.sprite.Sprite.__init__(self, self.groups) self.game = game self.image = game.wall_image self.rect = self.image.get_rect() self.x = x self.y = y self.rect.x = x * tileSize self.rect.y = y * tileSize class Floor(pygame.sprite.Sprite): """ class to contain all the data for floor sprites """ def __init__(self, game, x, y): """ initalizes a floor sprite when an instance is created in the game parameter, at the x and y paramters """ self.groups = game.all_sprites, game.floor pygame.sprite.Sprite.__init__(self, self.groups) self.game = game self.image = game.floor_image self.rect = self.image.get_rect() self.x = x self.y = y self.rect.x = x * tileSize self.rect.y = y * tileSize class Food(pygame.sprite.Sprite): """ class to contain all the data for food sprites """ def __init__(self, game, x, y): """ initalizes a food sprite when an instance is created in the game parameter, at the x and y paramters """ self.groups = game.all_sprites, game.food pygame.sprite.Sprite.__init__(self, self.groups) self.game = game # picks random image for the sprite self.image = pygame.image.load(os.path.join(food_folder, (random.choice(food_images)))).convert_alpha() self.rect = self.image.get_rect() self.x = x self.y = y self.rect.x = x * tileSize self.rect.y = y * tileSize # checks if the sprite is allowed to spawn in the x and y self.spawnable = False collided = pygame.sprite.spritecollide(self, self.game.floor, False) for sprite in collided: if self.x == sprite.x and self.y == sprite.y: self.spawnable = True if self.spawnable == False: self.kill()
3.640625
4
app/schemas/usage_logs.py
wiki-yu/fastapi-algorithm-library
0
11554
<gh_stars>0 from typing import Optional, List from pydantic import BaseModel class UsageLog(BaseModel): api_key: str is_active: bool never_expire: bool expiration_date: str latest_query_date: Optional[str] total_queries: int class UsageLogs(BaseModel): logs: List[UsageLog]
2.21875
2
dstf/core.py
anthonydugois/dstf
0
11555
from abc import ABCMeta, abstractmethod from collections import OrderedDict from math import inf from typing import Iterator, Any, List, Dict, Type, Optional EPSILON = 1e-4 class Error(Exception): pass class ConstraintError(Error): pass class Constraint(metaclass=ABCMeta): @abstractmethod def isvalid(self, schedule: "Schedule", chunk: "Chunk") -> bool: pass def geterror(self, schedule: "Schedule", chunk: "Chunk") -> str: return "'{}' constraint is not met".format(type(self).__name__) class Property(metaclass=ABCMeta): @abstractmethod def get(self, schedule: "Schedule") -> Any: pass class Operator(metaclass=ABCMeta): @abstractmethod def apply(self, schedule: "Schedule") -> Any: pass class Task: def __init__(self, name: str): self.name = name self.constraints = OrderedDict() def __contains__(self, constraint_cls: Type["Constraint"]) -> bool: return constraint_cls in self.constraints def __iter__(self) -> Iterator[Type["Constraint"]]: return iter(self.constraints) def __getitem__(self, constraint_cls: Type["Constraint"]) -> "Constraint": return self.constraints[constraint_cls] def __getattr__(self, attr: str): for ctr in self.constraints.values(): if attr in ctr.__dict__: return ctr.__dict__[attr] raise AttributeError("'{}' task has no attribute '{}'".format(self.name, attr)) def set(self, constraint: "Constraint") -> "Task": self.constraints[type(constraint)] = constraint return self class Chunk: def __init__(self, task: "Task", start_time: float, proctimes: Dict[Any, float]): self.task = task self.start_time = start_time self.proctimes = proctimes def completion_time(self, node: Any) -> float: if node in self.proctimes: return self.start_time + self.proctimes[node] else: return inf def isvalid(self, schedule: "Schedule") -> bool: for ctr in self.task.constraints.values(): if not ctr.isvalid(schedule, self): return False return True def append_to(self, schedule: "Schedule"): for ctr in self.task.constraints.values(): if not ctr.isvalid(schedule, self): raise ConstraintError(ctr.geterror(schedule, self)) if self.task in schedule.taskmap: schedule.taskmap[self.task].append(self) else: schedule.taskmap[self.task] = [self] for node in self.proctimes: if node in schedule.nodemap: schedule.nodemap[node].add(self) else: schedule.nodemap[node] = ChunkTree(node).add(self) def remove_from(self, schedule: "Schedule"): schedule.taskmap[self.task].remove(self) for node in self.proctimes: schedule.nodemap[node].remove(self) class ChunkNode: def __init__(self, chunk: "Chunk"): self.chunk = chunk self.height = 1 self.hi = -inf self.left = None self.right = None class ChunkTree: def __init__(self, node: Any): self.node = node self.root = None def __iter__(self) -> Optional[Iterator["ChunkNode"]]: return self._iter_from(self.root) def _iter_from(self, root: Optional["ChunkNode"]) -> Optional[Iterator["ChunkNode"]]: if root is None: return None else: yield from self._iter_from(root.left) yield root yield from self._iter_from(root.right) def at(self, time: float) -> List["ChunkNode"]: nodes = [] self._at_from(self.root, time, nodes) return nodes def _at_from(self, root: Optional["ChunkNode"], time: float, nodes: List["ChunkNode"]): if root is not None: if root.left is not None and time < root.left.hi: self._at_from(root.left, time, nodes) if root.chunk.start_time <= time < root.chunk.completion_time(self.node): nodes.append(root) self._at_from(root.right, time, nodes) def over(self, lo: float, hi: float) -> List["ChunkNode"]: nodes = [] self._over_from(self.root, lo, hi, nodes) return nodes def _over_from(self, root: Optional["ChunkNode"], lo: float, hi: float, nodes: List["ChunkNode"]): if root is not None: if root.left is not None and lo < root.left.hi: self._over_from(root.left, lo, hi, nodes) if lo < root.chunk.completion_time(self.node) and root.chunk.start_time < hi: nodes.append(root) self._over_from(root.right, lo, hi, nodes) def add(self, chunk: "Chunk") -> "ChunkTree": self.root = self._add_from(self.root, chunk) return self def _add_from(self, root: Optional["ChunkNode"], chunk: "Chunk") -> "ChunkNode": if root is None: treenode = ChunkNode(chunk) treenode.hi = chunk.completion_time(self.node) return treenode else: if chunk.start_time < root.chunk.start_time: root.left = self._add_from(root.left, chunk) else: root.right = self._add_from(root.right, chunk) root.height = 1 + max(self._height(root.left), self._height(root.right)) root.hi = max(self._hi(root), chunk.completion_time(self.node)) return self._rotate(root) def remove(self, chunk: "Chunk") -> "ChunkTree": self.root = self._remove_from(self.root, chunk) return self def _remove_from(self, root: Optional["ChunkNode"], chunk: "Chunk") -> Optional["ChunkNode"]: if root is None: return None else: if chunk.start_time < root.chunk.start_time: root.left = self._remove_from(root.left, chunk) elif chunk.start_time > root.chunk.start_time: root.right = self._remove_from(root.right, chunk) else: if root.left is None: return root.right elif root.right is None: return root.left else: successor = self._min_from(root.right) root.chunk = successor.chunk root.right = self._remove_from(root.right, successor.chunk) root.height = 1 + max(self._height(root.left), self._height(root.right)) root.hi = max(root.chunk.completion_time(self.node), self._hi(root.left), self._hi(root.right)) return self._rotate(root) def _rotate(self, root: "ChunkNode") -> "ChunkNode": balance = self._balance(root) if balance > 1 and self._balance(root.left) >= 0: return self._rotate_right(root) elif balance > 1 and self._balance(root.left) < 0: root.left = self._rotate_left(root.left) return self._rotate_right(root) elif balance < -1 and self._balance(root.right) <= 0: return self._rotate_left(root) elif balance < -1 and self._balance(root.right) > 0: root.right = self._rotate_right(root.right) return self._rotate_left(root) else: return root def _rotate_left(self, root: "ChunkNode") -> "ChunkNode": pivot = root.right child = pivot.left pivot.left = root root.right = child root.height = 1 + max(self._height(root.left), self._height(root.right)) root.hi = max(root.chunk.completion_time(self.node), self._hi(root.left), self._hi(root.right)) pivot.height = 1 + max(self._height(pivot.left), self._height(pivot.right)) pivot.hi = max(pivot.chunk.completion_time(self.node), self._hi(pivot.left), self._hi(pivot.right)) return pivot def _rotate_right(self, root: "ChunkNode") -> "ChunkNode": pivot = root.left child = pivot.right pivot.right = root root.left = child root.height = 1 + max(self._height(root.left), self._height(root.right)) root.hi = max(root.chunk.completion_time(self.node), self._hi(root.left), self._hi(root.right)) pivot.height = 1 + max(self._height(pivot.left), self._height(pivot.right)) pivot.hi = max(pivot.chunk.completion_time(self.node), self._hi(pivot.left), self._hi(pivot.right)) return pivot def _balance(self, root: "ChunkNode") -> int: if root is None: return 0 else: return self._height(root.left) - self._height(root.right) def _height(self, root: "ChunkNode") -> int: if root is None: return 0 else: return root.height def _hi(self, root: Optional["ChunkNode"]) -> float: if root is None: return -inf else: return root.hi def min(self) -> Optional["ChunkNode"]: return self._min_from(self.root) def _min_from(self, root: "ChunkNode") -> Optional["ChunkNode"]: if root is None: return None else: current = root while current.left is not None: current = current.left return current def max(self) -> Optional["ChunkNode"]: return self._max_from(self.root) def _max_from(self, root: "ChunkNode") -> Optional["ChunkNode"]: if root is None: return None else: current = root while current.right is not None: current = current.right return current class Schedule: def __init__(self): self.taskmap = {} self.nodemap = {} def tasks(self) -> Iterator["Task"]: return iter(self.taskmap) def hastask(self, task: "Task") -> bool: return task in self.taskmap def task(self, task: "Task") -> Optional[List["Chunk"]]: if task in self.taskmap: return self.taskmap[task] else: return None def nodes(self) -> Iterator[Any]: return iter(self.nodemap) def hasnode(self, node: Any) -> bool: return node in self.nodemap def node(self, node: Any) -> Optional["ChunkTree"]: if node in self.nodemap: return self.nodemap[node] else: return None # def copy(self): # chunk_map = self.taskmap.copy() # # for tsk in chunk_map: # chunk_map[tsk] = chunk_map[tsk].copy() # # return Schedule(chunk_map) def get(self, prop: "Property") -> Any: return prop.get(self) def apply(self, operator: "Operator") -> Any: return operator.apply(self)
3.015625
3
2018/2018_06a.py
davidxiao93/Advent-of-Code
0
11556
input = """154, 159 172, 84 235, 204 181, 122 161, 337 305, 104 128, 298 176, 328 146, 71 210, 87 341, 195 50, 96 225, 151 86, 171 239, 68 79, 50 191, 284 200, 122 282, 240 224, 282 327, 74 158, 289 331, 244 154, 327 317, 110 272, 179 173, 175 187, 104 44, 194 202, 332 249, 197 244, 225 52, 127 299, 198 123, 198 349, 75 233, 72 284, 130 119, 150 172, 355 147, 314 58, 335 341, 348 236, 115 185, 270 173, 145 46, 288 214, 127 158, 293 237, 311""" from collections import namedtuple Point = namedtuple("Point", ["id", "x", "y"]) points = set() for id, line in enumerate(input.splitlines()): words = line.split(",") x, y = [int(a) for a in words] points.add(Point(id, x, y)) # get bounds a_point = next(iter(points)) left_bound = a_point.x right_bound = a_point.x up_bound = a_point.y down_bound = a_point.y for p in points: if p.x < left_bound: left_bound = p.x if p.x > right_bound: right_bound = p.x if p.y < up_bound: up_bound = p.y if p.y > down_bound: down_bound = p.y # Find closest points within the bounds # Anything outside the bounds is uninteresting as it just leads off into infinite space def distance(p, q): return abs(p.x - q.x) + abs(p.y - q.y) def find_closest(p, points): closest_dist = None closest = set() for q in points: dist = distance(p, q) if closest_dist == None or dist < closest_dist: closest = {q.id} closest_dist = dist elif dist == closest_dist: closest.add(q.id) return closest grid = [ [0] * (right_bound - left_bound + 1) for i in range(down_bound - up_bound + 1) ] for y in range(up_bound, down_bound + 1): for x in range(left_bound, right_bound + 1): closest_points = find_closest(Point(id=None, x=x, y=y), points) if len(closest_points) > 1: grid[y-up_bound][x-left_bound] = -1 elif len(closest_points) == 0: print("wtf") exit(1) else: grid[y - up_bound][x - left_bound] = closest_points.pop() # We have our grid, we can remove any point ids that lie on the edge as they # will continue off to infinity candidate_ids = {p.id for p in points} for y in [0, down_bound - up_bound]: for x in [0, right_bound - left_bound]: if grid[y][x] in candidate_ids: candidate_ids.remove(grid[y][x]) # we have our contenders # now find which has the smallest finite space ids_to_count = {} for y in range(0, down_bound - up_bound + 1): for x in range(0, right_bound - left_bound + 1): if grid[y][x] in candidate_ids: if grid[y][x] not in ids_to_count: ids_to_count[grid[y][x]] = 0 ids_to_count[grid[y][x]] += 1 print(max(ids_to_count.values()))
3.375
3
tests/test_backup.py
KonstantinPankratov/Backupy
1
11557
<gh_stars>1-10 import os from Backupy import Backupy def test_backup(): backup = Backupy() backup.add_directory('./') backup.start() assert os.path.exists(backup.filename) os.remove(backup.filename)
2.34375
2
tweetf0rm/process/crawler_process.py
amaurywalbert/mytweetf0rm
1
11558
#!/usr/bin/python # -*- coding: utf-8 -*- # import logging logger = logging.getLogger(__name__) import multiprocessing as mp import tweetf0rm.handler from tweetf0rm.redis_helper import CrawlerQueue #MAX_QUEUE_SIZE = 32767 class CrawlerProcess(mp.Process): def __init__(self, node_id, crawler_id, redis_config, handlers): super(CrawlerProcess, self).__init__() self.node_id = node_id self.crawler_id = crawler_id self.redis_config = redis_config #self.queue = mp.Queue(maxsize=MAX_QUEUE_SIZE) self.crawler_queue = CrawlerQueue(node_id, crawler_id, redis_config=redis_config) self.crawler_queue.clear() #self.lock = mp.Lock() self.handlers = handlers logger.debug("number of handlers attached: %d"%(len(handlers))) def get_crawler_id(self): return self.crawler_id def enqueue(self, request): #self.queue.put(request, block=True) self.crawler_queue.put(request) return True def get_cmd(self): #return self.queue.get(block=True) return self.crawler_queue.get(block=True) def get_queue_size(self): self.crawler_queue.qsize() def run(self): pass
2.5
2
cloudify_aws/ec2/resources/dhcp.py
marrowne/cloudify-aws-plugin
0
11559
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ EC2.DhcpOptions ~~~~~~~~~~~~~~ AWS EC2 DhcpOptions interface """ # Boto from botocore.exceptions import ClientError # Cloudify from cloudify_aws.common import decorators, utils from cloudify_aws.ec2 import EC2Base from cloudify_aws.common.constants import EXTERNAL_RESOURCE_ID RESOURCE_TYPE = 'EC2 Dhcp Options' DHCPOPTIONS = 'DhcpOptions' DHCPOPTIONS_ID = 'DhcpOptionsId' DHCPOPTIONS_IDS = 'DhcpOptionsIds' VPC_ID = 'VpcId' VPC_TYPE = 'cloudify.nodes.aws.ec2.Vpc' VPC_TYPE_DEPRECATED = 'cloudify.aws.nodes.Vpc' class EC2DHCPOptions(EC2Base): """ EC2 DhcpOptions interface """ def __init__(self, ctx_node, resource_id=None, client=None, logger=None): EC2Base.__init__(self, ctx_node, resource_id, client, logger) self.type_name = RESOURCE_TYPE @property def properties(self): """Gets the properties of an external resource""" params = {DHCPOPTIONS_IDS: [self.resource_id]} try: resources = \ self.client.describe_dhcp_options(**params) except ClientError: pass else: return resources.get(DHCPOPTIONS)[0] if resources else None def create(self, params): """ Create a new AWS EC2 DhcpOptions. """ return self.make_client_call('create_dhcp_options', params) def delete(self, params=None): """ Deletes an existing AWS EC2 DhcpOptions. """ self.logger.debug('Deleting %s with parameters: %s' % (self.type_name, params)) res = self.client.delete_dhcp_options(**params) self.logger.debug('Response: %s' % res) return res def attach(self, params): ''' Attach an AWS EC2 DhcpOptions to a VPC. ''' self.logger.debug('Attaching %s with: %s' % (self.type_name, params.get(VPC_ID, None))) res = self.client.associate_dhcp_options(**params) self.logger.debug('Response: %s' % res) return res def detach(self, params): ''' Detach an AWS EC2 VPN Gateway from a VPC. ''' self.logger.debug('Detaching %s from: %s' % (self.type_name, params.get(VPC_ID, None))) self.logger.debug('Attaching default %s' % (self.type_name)) res = self.client.associate_dhcp_options(**params) self.logger.debug('Response: %s' % res) return res @decorators.aws_resource(EC2DHCPOptions, resource_type=RESOURCE_TYPE) def prepare(ctx, resource_config, **_): """Prepares an AWS EC2 DhcpOptions""" # Save the parameters ctx.instance.runtime_properties['resource_config'] = resource_config @decorators.aws_resource(EC2DHCPOptions, RESOURCE_TYPE) def create(ctx, iface, resource_config, **_): """Creates an AWS EC2 DhcpOptions""" # Create a copy of the resource config for clean manipulation. params = \ dict() if not resource_config else resource_config.copy() # Actually create the resource create_response = iface.create(params)[DHCPOPTIONS] ctx.instance.runtime_properties['create_response'] = \ utils.JsonCleanuper(create_response).to_dict() dhcp_options_id = create_response.get(DHCPOPTIONS_ID, '') iface.update_resource_id(dhcp_options_id) utils.update_resource_id(ctx.instance, dhcp_options_id) @decorators.aws_resource(EC2DHCPOptions, RESOURCE_TYPE, ignore_properties=True) def delete(ctx, iface, resource_config, **_): """Deletes an AWS EC2 DhcpOptions""" # Create a copy of the resource config for clean manipulation. params = \ dict() if not resource_config else resource_config.copy() dhcp_options_id = params.get(DHCPOPTIONS_ID) if not dhcp_options_id: params[DHCPOPTIONS_ID] = \ iface.resource_id or \ ctx.instance.runtime_properties.get(EXTERNAL_RESOURCE_ID) iface.delete(params) @decorators.aws_resource(EC2DHCPOptions, RESOURCE_TYPE) def attach(ctx, iface, resource_config, **_): '''Attaches an AWS EC2 DhcpOptions to a VPC''' params = dict() if not resource_config else resource_config.copy() dhcp_options_id = params.get(DHCPOPTIONS_ID) if not dhcp_options_id: dhcp_options_id = iface.resource_id params.update({DHCPOPTIONS_ID: dhcp_options_id}) params.pop('DhcpConfigurations') vpc_id = params.get(VPC_ID) if not vpc_id: targ = \ utils.find_rel_by_node_type(ctx.instance, VPC_TYPE) or \ utils.find_rel_by_node_type(ctx.instance, VPC_TYPE_DEPRECATED) # Attempt to use the VPC ID from parameters. # Fallback to connected VPC. params[VPC_ID] = \ vpc_id or \ targ.target.instance.runtime_properties.get(EXTERNAL_RESOURCE_ID) ctx.instance.runtime_properties['vpc_id'] = vpc_id # # Actually attach the resources iface.attach(params) @decorators.aws_resource(EC2DHCPOptions, RESOURCE_TYPE, ignore_properties=True) def detach(ctx, iface, resource_config, **_): '''Detach an AWS EC2 DhcpOptions from a VPC''' params = dict() if not resource_config else resource_config.copy() params.update({DHCPOPTIONS_ID: 'default'}) vpc_id = params.get(VPC_ID) or ctx.instance.runtime_properties['vpc_id'] if not vpc_id: targ = \ utils.find_rel_by_node_type(ctx.instance, VPC_TYPE) or \ utils.find_rel_by_node_type(ctx.instance, VPC_TYPE_DEPRECATED) # Attempt to use the VPC ID from parameters. # Fallback to connected VPC. params[VPC_ID] = \ vpc_id or \ targ.target.instance.runtime_properties.get(EXTERNAL_RESOURCE_ID) else: params.update({VPC_ID: vpc_id}) iface.detach(params)
1.953125
2
tests/testing_support/sample_applications.py
douglasfarinelli/newrelic-python-agent
1
11560
# Copyright 2010 New Relic, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from urllib2 import urlopen # Py2.X except ImportError: from urllib.request import urlopen # Py3.X import sqlite3 as db from newrelic.api.time_trace import record_exception from newrelic.api.transaction import (add_custom_parameter, get_browser_timing_header, get_browser_timing_footer, record_custom_event) from newrelic.api.wsgi_application import wsgi_application _custom_parameters = { 'user' : 'user-name', 'account' : 'account-name', 'product' : 'product-name', 'bytes' : b'bytes-value', 'string' : 'string-value', 'unicode' : u'unicode-value', 'integer' : 1, 'float' : 1.0, 'invalid-utf8' : b'\xe2', 'multibyte-utf8' : b'\xe2\x88\x9a', 'multibyte-unicode' : b'\xe2\x88\x9a'.decode('utf-8'), 'list' : [], 'tuple' : (), 'dict' : {}, } _err_param = { 'err-param' : 'value' } def user_attributes_added(): """Expected values when the custom parameters in this file are added as user attributes """ user_attributes = _custom_parameters.copy() user_attributes['list'] = '[]' user_attributes['tuple'] = '()' user_attributes['dict'] = '{}' return user_attributes def error_user_params_added(): return _err_param.copy() @wsgi_application() def fully_featured_app(environ, start_response): status = '200 OK' path = environ.get('PATH_INFO') use_user_attrs = environ.get('record_attributes', 'TRUE') == 'TRUE' if use_user_attrs: for attr, val in _custom_parameters.items(): add_custom_parameter(attr, val) if 'db' in environ and int(environ['db']) > 0: connection = db.connect(":memory:") for i in range(int(environ['db']) - 1): connection.execute("create table test_db%d (a, b, c)" % i) if 'external' in environ: for i in range(int(environ['external'])): r = urlopen('http://www.python.org') r.read(10) if 'err_message' in environ: n_errors = int(environ.get('n_errors', 1)) for i in range(n_errors): try: # append number to stats engine to get unique errors, so they # don't immediately get filtered out. raise ValueError(environ['err_message'] + str(i)) except ValueError: if use_user_attrs: record_exception(params=_err_param) else: record_exception() text = '<html><head>%s</head><body><p>RESPONSE</p>%s</body></html>' output = (text % (get_browser_timing_header(), get_browser_timing_footer())).encode('UTF-8') response_headers = [('Content-type', 'text/html; charset=utf-8'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] @wsgi_application() def simple_exceptional_app(environ, start_response): start_response('500 :(',[]) raise ValueError('Transaction had bad value') @wsgi_application() def simple_app(environ, start_response): status = '200 OK' start_response(status, response_headers=[]) return [] @wsgi_application() def simple_custom_event_app(environ, start_response): params = {'snowman': u'\u2603', 'foo': 'bar'} record_custom_event('SimpleAppEvent', params) start_response(status='200 OK', response_headers=[]) return []
2.046875
2
spydrnet_tmr/transformation/replication/nmr.py
byuccl/spydrnet-tmr
0
11561
from spydrnet.ir import Port, Instance, InnerPin from spydrnet_tmr.transformation.util import add_suffix_to_name IN = Port.Direction.IN OUT = Port.Direction.OUT INOUT = Port.Direction.INOUT def apply_nmr(ports_and_instances_to_replicate, degree, name_suffix='NMR', rename_original=True): """ Replicate the selected ports and instances to the n-th degree. :param ports_and_instances_to_replicate: :param degree: number of total copies :param name_suffix: string to append to each replicated element (e.g. 'TMR' or 'DWC') :param rename_original: rename orginal domain :type rename_original: bool :return: A map from an original element to its replicas """ nmr_agent = NMR.from_originals_degree_suffix_and_rename(ports_and_instances_to_replicate, degree, name_suffix, rename_original) replicas = nmr_agent.apply() return replicas class NMR: @staticmethod def from_originals_degree_suffix_and_rename(originals, degree, suffix, rename): nmr_agent = NMR(originals, degree, suffix, rename) return nmr_agent def __init__(self, originals, degree, suffix, rename): # Internal state self._applied = False self._wires_to_replicate = None self._additional_ports_to_replicate = None self._wiremap = None self._replicas = dict() # Inputs for original in originals: # if isinstance(original, HRef): # original = original.item if isinstance(original, (Port, Instance)): self._replicas[original] = None self.replication_degree = degree self.name_suffix = suffix self.rename_original = rename def apply(self): #self._validate_inputs() self._identify_additional_wires_and_ports_to_replicate() self._replicate_ports_and_instances() self._replicate_wires() self._connect_wires() return self._replicas def _identify_additional_wires_and_ports_to_replicate(self): src_pins, snk_pins = self._idenfity_src_and_snk_pins_that_will_be_replicated() wires_to_replicate = self.identify_additional_wires_to_replicate(src_pins, snk_pins) ports_to_replicate = self.identify_additional_ports_to_replicate(wires_to_replicate) self._wires_to_replicate = wires_to_replicate self._replicas.update((port, None) for port in ports_to_replicate) @staticmethod def identify_additional_ports_to_replicate(wires_to_replicate): ports_to_replicate = set() inner_pins = set() outer_pins = set() for wire in wires_to_replicate: for pin in wire.pins: if isinstance(pin, InnerPin): inner_pins.add(pin) else: outer_pins.add(pin) for outer_pin in outer_pins: inner_pin = outer_pin.inner_pin if inner_pin in inner_pins: port = inner_pin.port ports_to_replicate.add(port) for pin in port.pins: inner_pins.discard(pin) return ports_to_replicate @staticmethod def identify_additional_wires_to_replicate(src_pins, snk_pins): wires_to_replicate = set() wires_found = set() for src_pin in src_pins: wire = src_pin.wire if not wire or wire in wires_found: continue wires_found.add(wire) search_stack = [(wire, False)] while search_stack: wire, visited = search_stack.pop() if visited: continue search_stack.append((wire, True)) for pin in wire.pins: if pin in snk_pins: for path_member, part_of_path in reversed(search_stack): if part_of_path is True: if path_member not in wires_to_replicate: wires_to_replicate.add(path_member) else: break elif pin not in src_pins: other_wires = pin.get_wires(selection='OUTSIDE' if isinstance(pin, InnerPin) else 'INSIDE') for other_wire in other_wires: if other_wire not in wires_found: wires_found.add(other_wire) search_stack.append((other_wire, False)) return wires_to_replicate def _idenfity_src_and_snk_pins_that_will_be_replicated(self): src_pins = set() snk_pins = set() for original in self._replicas.keys(): if isinstance(original, Port): direction = original.direction if direction in {IN, INOUT}: src_pins.update(original.get_pins(selection='INSIDE')) snk_pins.update(original.get_pins(selection='OUTSIDE')) if direction in {OUT, INOUT}: src_pins.update(original.get_pins(selection='OUTSIDE')) snk_pins.update(original.get_pins(selection='INSIDE')) else: reference = original.reference for port in reference.ports: direction = port.direction if direction in {IN, INOUT}: snk_pins.update(map(original.pins.get, port.pins)) if direction in {OUT, INOUT}: src_pins.update(map(original.pins.get, port.pins)) return src_pins, snk_pins def _replicate_ports_and_instances(self): for original in self._replicas.keys(): if isinstance(original, Port): self._replicate_port(original) else: self._replicate_instance(original) self._reorder_ports_for_readability() self._reorder_instances_for_readability() def _replicate_port(self, port): replicas = list() for ii in range(1, self.replication_degree): port_clone = port.clone() add_suffix_to_name(port_clone, self.name_suffix + '_' + str(ii)) replicas.append(port_clone) port.definition.add_port(port_clone) if self.rename_original: add_suffix_to_name(port, self.name_suffix + '_' + '0') self._replicas[port] = replicas def _replicate_instance(self, inst): replicas = list() for ii in range(1, self.replication_degree): inst_clone = inst.clone() add_suffix_to_name(inst_clone, self.name_suffix + '_' + str(ii)) replicas.append(inst_clone) inst.parent.add_child(inst_clone) if self.rename_original: add_suffix_to_name(inst, self.name_suffix + '_' + '0') self._replicas[inst] = replicas def _reorder_ports_for_readability(self): reordered_definitions = set() for original in self._replicas.keys(): if isinstance(original, Port): definition = original.definition if definition not in reordered_definitions: reordered_definitions.add(definition) new_order = list() def_ports = definition.ports def_ports_len = len(def_ports) for def_port in def_ports: new_order.append(def_port) if def_port in self._replicas: new_order += self._replicas[def_port] if len(new_order) == def_ports_len: break definition.ports = new_order def _reorder_instances_for_readability(self): reordered_definitions = set() for original in self._replicas: if isinstance(original, Instance): definition = original.parent if definition not in reordered_definitions: reordered_definitions.add(definition) new_order = list() def_children = definition.children def_children_len = len(def_children) for def_child in def_children: new_order.append(def_child) if def_child in self._replicas: new_order += self._replicas[def_child] if len(new_order) == def_children_len: break definition.children = new_order def _replicate_wires(self): self._wiremap = dict() replicated_cables = set() for wire in self._wires_to_replicate: cable = wire.cable if cable not in replicated_cables: replicated_cables.add(cable) for ii in range(1, self.replication_degree): cable_clone = cable.clone() add_suffix_to_name(cable_clone, self.name_suffix + '_' + str(ii)) for wire_index, cable_wire in enumerate(cable.wires): if cable_wire in self._wires_to_replicate: if cable_wire not in self._wiremap: self._wiremap[cable_wire] = list() self._wiremap[cable_wire].append(cable_clone.wires[wire_index]) cable.definition.add_cable(cable_clone) if self.rename_original: add_suffix_to_name(cable, self.name_suffix + '_' + '0') self._reorder_cables_for_readibility() def _reorder_cables_for_readibility(self): reordered_definitions = set() for wire in self._wiremap: definition = wire.cable.definition if definition not in reordered_definitions: reordered_definitions.add(definition) new_order = list() visited_cables = set() def_cables = definition.cables for def_cable in def_cables: if def_cable in visited_cables: continue visited_cables.add(def_cable) new_order.append(def_cable) for wire in def_cable.wires: if wire in self._wiremap: other_cables = list(other_wire.cable for other_wire in self._wiremap[wire]) for other_cable in other_cables: if other_cable not in visited_cables: visited_cables.add(other_cable) new_order.append(other_cable) definition.cables = new_order def _connect_wires(self): self._connect_replicated_wires() self._connect_non_replicated_wires_to_replicated_pins() def _connect_replicated_wires(self): for wire, other_wires in self._wiremap.items(): for pin in wire.pins: if isinstance(pin, InnerPin): port = pin.port if port in self._replicas: other_ports = self._replicas[port] pin_index = port.pins.index(pin) for ii in range(self.replication_degree - 1): other_wires[ii].connect_pin(other_ports[ii].pins[pin_index]) else: inner_pin = pin.inner_pin instance = pin.instance if instance in self._replicas: other_instances = self._replicas[instance] for ii in range(self.replication_degree - 1): other_wires[ii].connect_pin(other_instances[ii].pins[inner_pin]) else: # TODO: if move this outside of the if does it do what we would expect? port = inner_pin.port if port in self._replicas: other_ports = self._replicas[port] pin_index = port.pins.index(inner_pin) for ii in range(self.replication_degree - 1): other_wires[ii].connect_pin(instance.pins[other_ports[ii].pins[pin_index]]) def _connect_non_replicated_wires_to_replicated_pins(self): pinmap = dict() for original in self._replicas: if isinstance(original, Instance): inst = original other_instances = self._replicas[inst] for pin in inst.pins: if pin.inner_pin.port.direction in {IN, INOUT}: wire = pin.wire if wire and wire not in self._wiremap: inner_pin = pin.inner_pin pinmap[pin] = list() for ii in range(self.replication_degree - 1): other_pin = other_instances[ii].pins[inner_pin] pinmap[pin].append(other_pin) wire.connect_pin(other_pin) elif isinstance(original, Port): port = original other_ports = self._replicas[port] for pin in port.pins: if port.direction in {OUT, INOUT}: wire = pin.wire if wire and wire not in self._wiremap: pin_index = pin.port.pins.index(pin) pinmap[pin] = list() for ii in range(self.replication_degree - 1): other_pin = other_ports[ii].pins[pin_index] pinmap[pin].append(other_pin) wire.connect_pin(other_pin) self._reorder_pins_for_readibility(pinmap) @staticmethod def _reorder_pins_for_readibility(pinmap): reordered_wires = set() for pin in pinmap: wire = pin.wire if wire not in reordered_wires: reordered_wires.add(wire) new_order = list() wire_pins = wire.pins wire_pins_len = len(wire_pins) for wire_pin in wire_pins: new_order.append(wire_pin) if wire_pin in pinmap: new_order += pinmap[wire_pin] if len(new_order) == wire_pins_len: break wire.pins = new_order
2.703125
3
pyspark/example/spark_core/4.7_spark_prog.py
chiliangpi/hellobi
53
11562
<reponame>chiliangpi/hellobi import os import numpy as np import sys import logging LOG_PATH = os.environ['log'] spark_home = os.environ['SPARK_HOME'] sys.path.insert(0, os.path.join(spark_home, 'python')) sys.path.insert(0, os.path.join(spark_home, 'python/lib/py4j-0.10.4-src.zip')) from pyspark.sql import SparkSession spark = SparkSession.builder.appName("test") \ .getOrCreate() logger = logging.getLogger(__name__) logger.addHandler(logging.FileHandler(LOG_PATH)) def main(*args): top = int(args[0][0]) data = spark.read.csv("hdfs:///tmp/ratings.csv", sep = ',', header= True) result = (data .groupBy("movieid") .agg({'rating': 'mean'}) .withColumnRenamed("avg(rating)", "avg_ratings") .dropna() .orderBy(['avg_ratings'], ascending=[0]) .limit(top)) logger.info("result: {}".format(result.toPandas())) #spark.stop() if __name__ == '__main__': logging.basicConfig(format='[%(levelname)s] %(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO) main(sys.argv[1:])
2.4375
2
infra_macros/fbcode_macros/tests/shell_test.py
xw285cornell/buckit
0
11563
# Copyright 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. from __future__ import absolute_import, division, print_function, unicode_literals import shlex import tests.utils class ShellTest(tests.utils.TestCase): includes = [("@fbcode_macros//build_defs:shell.bzl", "shell")] @tests.utils.with_project() def test_split_works_like_shlex_split(self, root): test_strings = [ r"", r"FOO BAR", " foo \t\nbar\n baz", r'foo -D"bar"', r'foo -D"\"something quoted\"" last\ string', r'foo -D"\n contains backslash still" ', r"""foo -D'something something \"dark side\"'""", r"""-DFOO -D"\ B'A'R=\"something here\""'something" else' -D\ BAZ -D\\some""", r'''-DFOO -DBAR="baz \"\\\"lots of quotes\\\"\""''', ] commands = ["shell.split(%r)" % s.encode("ascii") for s in test_strings] expected = [shlex.split(s) for s in test_strings] result = root.runUnitTests(self.includes, commands) self.assertSuccess(result) self.assertEqual( expected, [[x.encode("utf-8") for x in line] for line in result.debug_lines] )
2.40625
2
bash/src/_func_storage.py
BillGatesCat/yf
19
11564
class _FuncStorage: def __init__(self): self._function_map = {} def insert_function(self, name, function): self._function_map[name] = function def get_all_functions(self): return self._function_map
2.390625
2
tests/unit/core/SubdomainTest.py
edgargmartinez/OpenPNM
3
11565
import openpnm as op import scipy as sp import pytest class SubdomainTest: def setup_class(self): ws = op.Workspace() ws.settings['local_data'] = True self.net = op.network.Cubic(shape=[3, 3, 3]) self.geo = op.geometry.GenericGeometry(network=self.net, pores=self.net.Ps, throats=self.net.Ts) self.geo['pore.diameter'] = sp.rand(self.net.Np) self.geo.add_model(propname='pore.volume', model=op.models.geometry.pore_volume.sphere) self.geo['throat.diameter'] = sp.rand(self.net.Nt) self.geo.add_model(propname='throat.area', model=op.models.geometry.throat_area.cylinder) self.geo.regenerate_models() self.phase1 = op.phases.GenericPhase(network=self.net) self.phase2 = op.phases.GenericPhase(network=self.net) self.phys1 = op.physics.GenericPhysics(network=self.net, geometry=self.geo, phase=self.phase1) self.phys1['pore.blah'] = 1.0 self.phys2 = op.physics.GenericPhysics(network=self.net, geometry=self.geo, phase=self.phase2) self.phys2['pore.blah'] = 2.0 def teardown_class(self): ws = op.Workspace() ws.clear() def test_drop_locations_from_geom_successively_with_single_geometry(self): assert self.geo.Np == 27 assert self.geo.Nt == 54 self.geo._drop_locations(pores=[0, 1, 2], throats=[0, 1, 2]) assert self.geo.Np == 24 assert self.geo.Nt == 51 self.geo._drop_locations(pores=[3, 4], throats=[3, 4]) assert self.geo.Np == 22 assert self.geo.Nt == 49 self.geo._add_locations(pores=[0, 1, 2, 3, 4], throats=[0, 1, 2, 3, 4]) assert self.geo.Np == 27 assert self.geo.Nt == 54 def test_drop_locations_from_physics_successively_with_two_physics(self): assert self.phys1.Np == 27 assert self.phys1.Nt == 54 self.phys1._drop_locations(pores=[0, 1], throats=[0, 1]) assert self.phys1.Np == 25 assert self.phys1.Nt == 52 self.phys1._drop_locations(pores=[3, 4], throats=[3, 4]) assert self.phys1.Np == 23 assert self.phys1.Nt == 50 self.phys1._add_locations(pores=[0, 1, 3, 4], throats=[0, 1, 3, 4]) assert self.phys1.Np == 27 assert self.phys1.Nt == 54 def test_drop_locations_all_but_not_complete(self): assert self.phys1.Np == 27 assert self.phys1.Nt == 54 assert 'pore.'+self.phys1.name in self.phase1.keys() assert 'throat.'+self.phys1.name in self.phase1.keys() self.phys1._drop_locations(pores=self.net.Ps) assert 'pore.'+self.phys1.name in self.phase1.keys() assert self.phase1.num_pores(self.phys1.name) == 0 assert 'throat.'+self.phys1.name in self.phase1.keys() self.phys1._drop_locations(throats=self.net.Ts) assert 'throat.'+self.phys1.name in self.phase1.keys() assert self.phase1.num_throats(self.phys1.name) == 0 self.phys1._add_locations(pores=self.net.Ps, throats=self.net.Ts) def test_writting_subdict_names_across_subdomains(self): ws = op.Workspace() proj = ws.new_project() pn = op.network.Cubic(shape=[10, 10, 10], spacing=1e-4, project=proj) Ps = pn['pore.coords'][:, 0] < pn['pore.coords'][:, 0].mean() Ts = pn.find_neighbor_throats(pores=Ps, mode='xnor') geo1 = op.geometry.StickAndBall(network=pn, pores=Ps, throats=Ts) Ps = pn['pore.coords'][:, 0] >= pn['pore.coords'][:, 0].mean() Ts = pn.find_neighbor_throats(pores=Ps, mode='or') geo2 = op.geometry.StickAndBall(network=pn, pores=Ps, throats=Ts) pn['pore.foo'] = 1 # Can't create a subdict below foo with pytest.raises(Exception): pn['pore.foo.bar'] = 1 # Can create a subdict directly pn['pore.baz.bar'] = 2 # Can't create a new item already used as subdict with pytest.raises(Exception): pn['pore.baz'] = 2 # Also works on subdomains geo1['pore.blah'] = 1 with pytest.raises(Exception): geo1['pore.blah.boo'] = 1 geo1['pore.bee.bop'] = 1 with pytest.raises(Exception): geo1['pore.bee'] = 1 # Now start looking across objects with pytest.raises(Exception): geo1['pore.foo'] = 1 # Already exists on pn with pytest.raises(Exception): geo1['pore.foo.bar'] = 1 # pore.foo already exists on pn with pytest.raises(Exception): geo1['pore.baz'] = 1 # pore.baz.bar already exists on pn # Now start looking across objects geo2['pore.blah'] = 1 geo2['pore.bee.bop'] = 1 with pytest.raises(Exception): geo1['pore.bee'] = 1 with pytest.raises(Exception): pn['pore.bee'] = 1 with pytest.raises(Exception): pn['pore.bee.bop'] = 1 if __name__ == '__main__': t = SubdomainTest() self = t t.setup_class() for item in t.__dir__(): if item.startswith('test'): print('running test: '+item) t.__getattribute__(item)()
2.078125
2
loci/io.py
SLIPO-EU/loci
3
11566
import pandas as pd from shapely.geometry import Point import geopandas as gpd import math import osmnx import requests from io import BytesIO from zipfile import ZipFile def read_poi_csv(input_file, col_id='id', col_name='name', col_lon='lon', col_lat='lat', col_kwds='kwds', col_sep=';', kwds_sep=',', source_crs='EPSG:4326', target_crs='EPSG:4326', keep_other_cols=False): """Creates a POI GeoDataFrame from an input CSV file. Args: input_file (string): Path to the input csv file. col_id (string): Name of the column containing the POI id (default: `id`). col_name (string): Name of the column containing the POI name (default: `name`). col_lon (string): Name of the column containing the POI longitude (default: `lon`). col_lat (string): Name of the column containing the POI latitude (default: `lat`). col_kwds (string): Name of the column containing the POI keywords (default: `kwds`). col_sep (string): Column delimiter (default: `;`). kwds_sep (string): Keywords delimiter (default: `,`). source_crs (string): Coordinate Reference System of input data (default: `EPSG:4326`). target_crs (string): Coordinate Reference System of the GeoDataFrame to be created (default: `EPSG:4326`). keep_other_cols (bool): Whether to keep the rest of the columns in the csv file (default: `False`). Returns: A POI GeoDataFrame with columns `id`, `name` and `kwds`. """ def lon_lat_to_point(row, c_lon, c_lat): try: x_lon = float(row[c_lon]) y_lat = float(row[c_lat]) if math.isnan(x_lon) is False and math.isnan(y_lat) is False: return Point(x_lon, y_lat) else: return float('NaN') except: return float('NaN') pois = pd.read_csv(input_file, delimiter=col_sep, error_bad_lines=False) init_poi_size = pois.index.size columns = list(pois) subset_cols = [] # Columns to Check for N/A, Nulls if keep_other_cols: subset_cols.extend(columns) else: subset_cols = [col_id, col_lon, col_lat] if col_name in columns: subset_cols.append(col_name) if col_kwds in columns: subset_cols.append(col_kwds) # Geometry Column(Uncleaned) pois['geometry'] = pois.apply(lambda row: lon_lat_to_point(row, col_lon, col_lat), axis=1) subset_cols.append('geometry') # Drop Columns Not in subset Columns. drop_columns = set(columns) - set(subset_cols) pois.drop(drop_columns, inplace=True, axis=1) # Drop all N/A, Null rows from DataFrame. pois.dropna(inplace=True) if init_poi_size - pois.index.size > 0: print("Skipped", (init_poi_size - pois.index.size), "rows due to errors.") if col_kwds in columns: pois[col_kwds] = pois[col_kwds].map(lambda s: s.split(kwds_sep)) source_crs = {'init': source_crs} target_crs = {'init': target_crs} pois = gpd.GeoDataFrame(pois, crs=source_crs, geometry=pois['geometry']).to_crs(target_crs).drop(columns=[col_lon, col_lat]) print('Loaded ' + str(len(pois.index)) + ' POIs.') return pois def import_osmnx(bound, target_crs='EPSG:4326'): """Creates a POI GeoDataFrame from POIs retrieved by OSMNX (https://github.com/gboeing/osmnx). Args: bound (polygon): A polygon to be used as filter. target_crs (string): Coordinate Reference System of the GeoDataFrame to be created (default: `EPSG:4326`). Returns: A POI GeoDataFrame with columns `id`, `name` and `kwds`. """ # retrieve pois pois = osmnx.pois.pois_from_polygon(bound) if len(pois.index) > 0: # filter pois pois = pois[pois.amenity.notnull()] pois_filter = pois.element_type == 'node' pois = pois[pois_filter] # restructure gdf subset_cols = ['osmid', 'amenity', 'name', 'geometry'] columns = list(pois) drop_columns = set(columns) - set(subset_cols) pois.drop(drop_columns, inplace=True, axis=1) pois = pois.reset_index(drop=True) pois = pois.rename(columns={'osmid': 'id', 'amenity': 'kwds'}) pois['kwds'] = pois['kwds'].map(lambda s: [s]) if target_crs != 'EPSG:4326': target_crs = {'init': target_crs} pois = pois.to_crs(target_crs) print('Loaded ' + str(len(pois.index)) + ' POIs.') return pois def import_osmwrangle(osmwrangle_file, target_crs='EPSG:4326', bound=None): """Creates a POI GeoDataFrame from a file produced by OSMWrangle (https://github.com/SLIPO-EU/OSMWrangle). Args: osmwrangle_file (string): Path or URL to the input csv file. target_crs (string): Coordinate Reference System of the GeoDataFrame to be created (default: `EPSG:4326`). bound (polygon): A polygon to be used as filter. Returns: A POI GeoDataFrame with columns `id`, `name` and `kwds`. """ def lon_lat_to_point(row, c_lon, c_lat): x_lon = float(row[c_lon]) y_lat = float(row[c_lat]) if math.isnan(x_lon) is False and math.isnan(y_lat) is False: return Point(x_lon, y_lat) else: return float('NaN') col_sep = '|' col_id = 'ID' col_lon = 'LON' col_lat = 'LAT' col_name = 'NAME' col_cat = 'CATEGORY' col_subcat = 'SUBCATEGORY' source_crs = {'init': 'EPSG:4326'} # Load the file if osmwrangle_file.startswith('http') and osmwrangle_file.endswith('.zip'): response = requests.get(osmwrangle_file) zip_file = ZipFile(BytesIO(response.content)) with zip_file.open(zip_file.namelist()[0]) as csvfile: pois = pd.read_csv(csvfile, delimiter=col_sep, error_bad_lines=False) else: pois = pd.read_csv(osmwrangle_file, delimiter=col_sep, error_bad_lines=False) init_poi_size = pois.index.size columns = list(pois) subset_cols = [col_id, col_name, 'kwds', col_lon, col_lat] # Geometry Column(Uncleaned) pois['geometry'] = pois.apply(lambda row: lon_lat_to_point(row, col_lon, col_lat), axis=1) subset_cols.append('geometry') pois['kwds'] = pois[col_cat] + ',' + pois[col_subcat] pois['kwds'] = pois['kwds'].map(lambda s: s.split(',')) # Drop Columns Not in subset Columns. drop_columns = set(columns) - set(subset_cols) pois.drop(drop_columns, inplace=True, axis=1) # Drop all N/A, Null rows from DataFrame. pois.dropna(inplace=True) if init_poi_size - pois.index.size > 0: print("Skipped", (init_poi_size - pois.index.size), "rows due to errors.") pois = pois.rename(columns={col_id: 'id', col_name: 'name'}) pois = gpd.GeoDataFrame(pois, crs=source_crs, geometry=pois['geometry']).drop(columns=[col_lon, col_lat]) # Check whether location filter should be applied if bound is not None: spatial_filter = pois.geometry.intersects(bound) pois = pois[spatial_filter] if target_crs != 'EPSG:4326': target_crs = {'init': target_crs} pois = pois.to_crs(target_crs) print('Loaded ' + str(len(pois.index)) + ' POIs.') return pois def retrieve_osm_loc(name, buffer_dist=0): """Retrieves a polygon from an OSM location. Args: name (string): Name of the location to be resolved. buffer_dist (numeric): Buffer distance in meters. Returns: A polygon. """ geom = osmnx.core.gdf_from_place(name, buffer_dist=buffer_dist) if len(geom.index) > 0: geom = geom.iloc[0].geometry else: geom = None return geom def to_geojson(gdf, output_file): """Exports a GeoDataFrame to a GeoJSON file. Args: gdf (GeoDataFrame): The GeoDataFrame object to be exported. output_file (string): Path to the output file. """ gdf.to_file(output_file, driver='GeoJSON')
3.15625
3
src/roles/wolf.py
timson622222/lykos
0
11567
import re import random from collections import defaultdict import src.settings as var from src.utilities import * from src import debuglog, errlog, plog from src.decorators import cmd, event_listener from src.messages import messages from src.events import Event KILLS = {} # type: Dict[str, List[str]] @cmd("kill", chan=False, pm=True, playing=True, phases=("night",)) def wolf_kill(cli, nick, chan, rest): """Kills one or more players as a wolf.""" role = get_role(nick) # eventually cub will listen on targeted_command and block kills that way if role not in var.WOLF_ROLES - {"wolf cub"}: return if nick in var.SILENCED: pm(cli, nick, messages["silenced"]) return if var.DISEASED_WOLVES: pm(cli, nick, messages["ill_wolves"]) return # eventually crow will listen on targeted_command and block kills that way # (or more likely, that restriction will be lifted and crow can do both) if role == "werecrow" and var.OBSERVED.get(nick): pm(cli, nick, messages["werecrow_transformed_nokill"]) return pieces = re.split(" +", rest) victims = [] orig = [] num_kills = 1 if var.ANGRY_WOLVES: num_kills = 2 i = 0 extra = 0 while i < num_kills + extra: try: victim = pieces[i] except IndexError: break if victim.lower() == "and": extra += 1 i += 1 victim = pieces[i] victim = get_victim(cli, nick, victim, False) if not victim: return if victim == nick: pm(cli, nick, messages["no_suicide"]) return if in_wolflist(nick, victim): pm(cli, nick, messages["wolf_no_target_wolf"]) return orig.append(victim) evt = Event("targeted_command", {"target": victim, "misdirection": True, "exchange": True}) evt.dispatch(cli, var, "kill", nick, victim, frozenset({"detrimental"})) if evt.prevent_default: return victim = evt.data["target"] victims.append(victim) i += 1 if len(set(victims)) < len(victims): pm(cli, nick, messages["wolf_must_target_multiple"]) return KILLS[nick] = victims if len(orig) > 1: # need to expand this eventually msg = messages["wolf_target_multiple"].format(orig[0], orig[1]) pm(cli, nick, messages["player"].format(msg)) debuglog("{0} ({1}) KILL: {2} ({3}) and {4} ({5})".format(nick, role, victims[0], get_role(victims[0]), victims[1], get_role(victims[1]))) else: msg = messages["wolf_target"].format(orig[0]) pm(cli, nick, messages["player"].format(msg)) if num_kills > 1: pm(cli, nick, messages["wolf_target_second"]) debuglog("{0} ({1}) KILL: {2} ({3})".format(nick, role, victims[0], get_role(victims[0]))) if in_wolflist(nick, nick): relay_wolfchat_command(cli, nick, messages["wolfchat"].format(nick, msg), var.WOLF_ROLES, is_wolf_command=True, is_kill_command=True) chk_nightdone(cli) @cmd("retract", "r", chan=False, pm=True, playing=True, phases=("night",)) def wolf_retract(cli, nick, chan, rest): """Removes a wolf's kill selection.""" if nick not in KILLS: return del KILLS[nick] pm(cli, nick, messages["retracted_kill"]) relay_wolfchat_command(cli, nick, messages["wolfchat_retracted_kill"].format(nick), var.WOLF_ROLES, is_wolf_command=True, is_kill_command=True) @event_listener("del_player") def on_del_player(evt, cli, var, nick, nickrole, nicktpls, death_triggers): for a,b in list(KILLS.items()): for n in b: if n == nick: KILLS[a].remove(nick) if a == nick or len(KILLS[a]) == 0: del KILLS[a] @event_listener("rename_player") def on_rename(evt, cli, var, prefix, nick): kvp = [] for a,b in KILLS.items(): nl = [] for n in b: if n == prefix: n = nick nl.append(n) if a == prefix: a = nick kvp.append((a,nl)) KILLS.update(kvp) if prefix in KILLS: del KILLS[prefix] @event_listener("night_acted") def on_acted(evt, cli, var, nick, sender): if nick in KILLS: evt.data["acted"] = True @event_listener("transition_day", priority=1) def on_transition_day(evt, cli, var): # figure out wolf target found = defaultdict(int) # split off into event + wolfcub.py num_kills = 1 if var.ANGRY_WOLVES: num_kills = 2 for v in KILLS.values(): for p in v: if p: # kill target starting with ! is invalid # right now nothing does this, but monster eventually will if p[0] == "!": continue found[p] += 1 for i in range(num_kills): maxc = 0 dups = [] for v, c in found.items(): if c > maxc: maxc = c dups = [v] elif c == maxc: dups.append(v) if maxc and dups: victim = random.choice(dups) evt.data["victims"].append(victim) evt.data["bywolves"].add(victim) evt.data["onlybywolves"].add(victim) # special key to let us know to randomly select a wolf in case of retribution totem evt.data["killers"][victim].append("@wolves") del found[victim] # this should be moved to an event in kill, where monster prefixes their nick with ! # and fallen angel subsequently removes the ! prefix if len(var.ROLES["fallen angel"]) == 0: for monster in var.ROLES["monster"]: if monster in victims: evt.data["victims"].remove(monster) evt.data["bywolves"].discard(monster) evt.data["onlybywolves"].discard(monster) @event_listener("exchange_roles") def on_exchange(evt, cli, var, actor, nick, actor_role, nick_role): if actor in KILLS: del KILLS[actor] if nick in KILLS: del KILLS[nick] @event_listener("chk_nightdone", priority=3) def on_chk_nightdone(evt, cli, var): if not var.DISEASED_WOLVES: evt.data["actedcount"] += len(KILLS) # eventually wolf cub will remove itself from nightroles in wolfcub.py evt.data["nightroles"].extend(list_players(var.WOLF_ROLES - {"wolf cub"})) @event_listener("chk_nightdone", priority=20) def on_chk_nightdone2(evt, cli, var): if not evt.prevent_default and not var.DISEASED_WOLVES: # flatten KILLS kills = set() for ls in KILLS.values(): kills.update(ls) # check if wolves are actually agreeing # allow len(kills) == 0 through as that means that crow was dumb and observed instead if not var.ANGRY_WOLVES and len(kills) > 1: evt.data["actedcount"] -= 1 elif var.ANGRY_WOLVES and (len(kills) == 1 or len(kills) > 2): evt.data["actedcount"] -= 1 @event_listener("transition_night_end", priority=2) def on_transition_night_end(evt, cli, var): ps = list_players() wolves = list_players(var.WOLFCHAT_ROLES) # roles in wolfchat (including those that can only listen in but not speak) wcroles = var.WOLFCHAT_ROLES # roles allowed to talk in wolfchat talkroles = var.WOLFCHAT_ROLES # condition imposed on talking in wolfchat (only during day/night, or None if talking is disabled) wccond = "" if var.RESTRICT_WOLFCHAT & var.RW_DISABLE_NIGHT: if var.RESTRICT_WOLFCHAT & var.RW_DISABLE_DAY: wccond = None else: wccond = " during day" elif var.RESTRICT_WOLFCHAT & var.RW_DISABLE_DAY: wccond = " during night" if var.RESTRICT_WOLFCHAT & var.RW_REM_NON_WOLVES: if var.RESTRICT_WOLFCHAT & var.RW_TRAITOR_NON_WOLF: wcroles = var.WOLF_ROLES talkroles = var.WOLF_ROLES else: wcroles = var.WOLF_ROLES | {"traitor"} talkroles = var.WOLF_ROLES | {"traitor"} elif var.RESTRICT_WOLFCHAT & var.RW_WOLVES_ONLY_CHAT: if var.RESTRICT_WOLFCHAT & var.RW_TRAITOR_NON_WOLF: talkroles = var.WOLF_ROLES else: talkroles = var.WOLF_ROLES | {"traitor"} for wolf in wolves: # should make the cursed information an event that cursedvillager can then add to # (e.g. an event to change what prefixes are sent with the role message, and a # 2nd event to change information in parens in player list) normal_notify = wolf in var.PLAYERS and not is_user_simple(wolf) role = get_role(wolf) cursed = "cursed " if wolf in var.ROLES["cursed villager"] and role in wcroles else "" if normal_notify: msg = "{0}_notify".format(role.replace(" ", "_")) cmsg = "cursed_" + msg try: if cursed: try: pm(cli, wolf, messages[cmsg]) except KeyError: pm(cli, wolf, messages[msg].format(cursed)) else: pm(cli, wolf, messages[msg].format(cursed)) except KeyError: # catchall in case we forgot something above an = 'n' if role.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, wolf, messages["undefined_role_notify"].format(an, role)) if len(wolves) > 1 and wccond is not None and role in talkroles: pm(cli, wolf, messages["wolfchat_notify"].format(wccond)) else: an = "n" if cursed == "" and role.startswith(("a", "e", "i", "o", "u")) else "" pm(cli, wolf, messages["wolf_simple"].format(an, cursed, role)) # !simple pl = ps[:] random.shuffle(pl) pl.remove(wolf) # remove self from list if role in wcroles: for i, player in enumerate(pl): prole = get_role(player) if prole in wcroles: cursed = "" if player in var.ROLES["cursed villager"]: cursed = "cursed " pl[i] = "\u0002{0}\u0002 ({1}{2})".format(player, cursed, prole) elif player in var.ROLES["cursed villager"]: pl[i] = player + " (cursed)" elif role == "warlock": for i, player in enumerate(pl): if player in var.ROLES["cursed villager"]: pl[i] = player + " (cursed)" pm(cli, wolf, "Players: " + ", ".join(pl)) if role in var.WOLF_ROLES - {"wolf cub"} and var.DISEASED_WOLVES: pm(cli, wolf, messages["ill_wolves"]) # TODO: split the following out into their own files (mystic, cub and alpha) if role == "wolf mystic": # if adding this info to !myrole, you will need to save off this count so that they can't get updated info until the next night # # of special villagers = # of players - # of villagers - # of wolves - # of neutrals numvills = len(ps) - len(list_players(var.WOLFTEAM_ROLES)) - len(list_players(("villager", "vengeful ghost", "time lord", "amnesiac", "lycan"))) - len(list_players(var.TRUE_NEUTRAL_ROLES)) pm(cli, wolf, messages["wolf_mystic_info"].format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else "")) if not var.DISEASED_WOLVES and var.ANGRY_WOLVES and role in var.WOLF_ROLES - {"wolf cub"}: pm(cli, wolf, messages["angry_wolves"]) if var.ALPHA_ENABLED and role == "alpha wolf" and wolf not in var.ALPHA_WOLVES: pm(cli, wolf, messages["wolf_bite"]) @event_listener("begin_day") def on_begin_day(evt, cli, var): KILLS.clear() @event_listener("reset") def on_reset(evt, var): KILLS.clear() # vim: set sw=4 expandtab:
2.34375
2
BOG.py
punyajoy/biosbias
0
11568
<filename>BOG.py # -*- coding: utf-8 -*- """ Created on Fri Feb 28 13:52:20 2020 @author: midas """ import os import glob import pandas as pd import numpy as np all_filenames=['Data/Train.csv', 'Data/Test.csv'] combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ]) combined_csv.to_csv( "combined_csv.csv", index=False, encoding='utf-8-sig') from tqdm import tqdm_notebook,tqdm from sklearn import preprocessing train_data=pd.read_csv("Data/Train.csv") test_data=pd.read_csv("Data/Test.csv") train_wo_g=[] train_w_g=[] test_wo_g=[] test_w_g=[] combined_csv for index,row in tqdm(combined_csv.iterrows()): try: index_to_start=int(row['start_pos']) except: continue tuple1= [row['raw'][index_to_start:],row['title'],row['gender']] tuple2= [row['bio'][index_to_start:],row['title'],row['gender']] train_w_g.append(tuple1) train_wo_g.append(tuple2) TrainTestWithGen = pd.DataFrame(train_w_g, columns =['Text', 'title', 'gender']) TrainTestWithoutGen= pd.DataFrame(train_wo_g, columns =['Text', 'title', 'gender']) # Cleaning the texts import re import nltk nltk.download('stopwords') from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer corpus = [] for i in range(0, 74595): review = re.sub('[^a-zA-Z]', ' ', TrainTestWithGen['Text'][i]) review = review.lower() review = review.split() ps = PorterStemmer() review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] review = ' '.join(review) corpus.append(review) # Creating the Bag of Words model from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features = 30000) X = cv.fit_transform(corpus).toarray() X_all=pd.DataFrame(X) X_all['title']=TrainTestWithGen['title'] X_all['gender']=TrainTestWithGen['gender'] X_Train=X_all[:53754] X_Test=X_all[53754:] X_Train.to_csv('Train_With_Gen.csv') X_Test.to_csv('Test_With_Gen.csv') #Without Gender corpus2 = [] for i in range(0, len(TrainTestWithGen)): review = re.sub('[^a-zA-Z]', ' ', TrainTestWithGen['Text'][i]) review = review.lower() review = review.split() ps = PorterStemmer() review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] review = ' '.join(review) corpus2.append(review) # Creating the Bag of Words model from sklearn.feature_extraction.text import CountVectorizer cv2 = CountVectorizer(max_features = 30000) X2 = cv2.fit_transform(corpus2).toarray() X_all2=pd.DataFrame(X2) X_all2['title']=TrainTestWithoutGen['title'] X_all2['gender']=TrainTestWithoutGen['gender'] X_Train2=X_all2[:53754] X_Test2=X_all2[53754:] X_Train2.to_csv('Train_WithOut_Gen.csv') X_Test2.to_csv('Test_WithOut_Gen.csv')
2.578125
3
src/ploomber/jupyter/manager.py
idomic/ploomber
0
11569
<reponame>idomic/ploomber """ Module for the jupyter extension """ import sys import datetime import os import contextlib from pprint import pprint from pathlib import Path from jupytext.contentsmanager import TextFileContentsManager from ploomber.sources.notebooksource import (_cleanup_rendered_nb, inject_cell) from ploomber.spec.dagspec import DAGSpec from ploomber.exceptions import DAGSpecInitializationError from ploomber.cli import parsers from ploomber.jupyter.dag import JupyterDAGManager @contextlib.contextmanager def chdir(directory): old_dir = os.getcwd() try: os.chdir(str(directory)) yield finally: os.chdir(old_dir) def resolve_path(parent, path): """ Functions functions resolves paths to make the {source} -> {task} mapping work even then `jupyter notebook` is initialized from a subdirectory of pipeline.yaml """ try: # FIXME: remove :linenumber return Path(parent, path).relative_to(Path('.').resolve()).as_posix().strip() except ValueError: return None def check_metadata_filter(log, model): try: cell_metadata_filter = ( model['content']['metadata']['jupytext']['cell_metadata_filter']) except Exception: cell_metadata_filter = None if cell_metadata_filter == '-all': log.warning('Your notebook has filter that strips out ' 'cell metadata when saving it from the Jupyter notebook ' 'app. This will prevent you from tagging your ' '"parameters" cell. It is possible that this comes ' 'from jupytext defaults, either add the tag by ' 'editing the notebook in a text editor or enable ' 'metadata in the Jupyter app: File -> Jupytext -> ' 'Include metadata') class PloomberContentsManager(TextFileContentsManager): """ Ploomber content manager subclasses jupytext TextFileContentsManager to keep jupytext features of opening .py files as notebooks but adds a feature that automatically injects parameters in notebooks if they are part of a pipeline defined in pipeline.yaml, these injected parameters are deleted before saving the file """ restart_msg = (' Fix the issue and and restart "jupyter notebook"') def load_dag(self, starting_dir=None): if self.dag is None or self.spec['meta']['jupyter_hot_reload']: self.log.info('[Ploomber] Loading dag...') msg = ('[Ploomber] An error occured when trying to initialize ' 'the pipeline. Cells won\' be injected until your ' 'pipeline processes correctly. See error details below.') if self.spec and not self.spec['meta']['jupyter_hot_reload']: msg += self.restart_msg env_var = os.environ.get('ENTRY_POINT') try: if env_var: (self.spec, self.dag, self.path) = parsers.load_entry_point(env_var) else: hot_reload = (self.spec and self.spec['meta']['jupyter_hot_reload']) (self.spec, self.dag, self.path) = DAGSpec._auto_load(starting_dir=starting_dir, reload=hot_reload) except DAGSpecInitializationError: self.reset_dag() self.log.exception(msg) else: if self.dag is not None: current = os.getcwd() if self.spec['meta'][ 'jupyter_hot_reload'] and current not in sys.path: # jupyter does not add the current working dir by # default, if using hot reload and the dag loads # functions from local files, importlib.reload will # fail # NOTE: might be better to only add this when the dag # is actually loading from local files but that means # we have to run some logic and increases load_dag # running time, which we need to be fast sys.path.append(current) base_path = Path(self.path).resolve() with chdir(base_path): # this dag object won't be executed, forcing speeds # rendering up self.dag.render(force=True) if self.spec['meta']['jupyter_functions_as_notebooks']: self.manager = JupyterDAGManager(self.dag) else: self.manager = None tuples = [(resolve_path(base_path, t.source.loc), t) for t in self.dag.values() if t.source.loc is not None] self.dag_mapping = { t[0]: t[1] for t in tuples if t[0] is not None } self.log.info('[Ploomber] Initialized dag from ' 'pipeline.yaml at' ': {}'.format(base_path)) self.log.info('[Ploomber] Pipeline mapping: {}'.format( pprint(self.dag_mapping))) else: # no pipeline.yaml found... self.log.info('[Ploomber] No pipeline.yaml found, ' 'skipping DAG initialization...') self.dag_mapping = None def reset_dag(self): self.spec = None self.dag = None self.path = None self.dag_mapping = None self.manager = None def __init__(self, *args, **kwargs): """ Initialize the content manger, look for a pipeline.yaml file in the current directory, if there is one, load it, if there isn't one don't do anything """ self.reset_dag() # try to automatically locate the dag spec self.load_dag() return super(PloomberContentsManager, self).__init__(*args, **kwargs) def get(self, path, content=True, type=None, format=None): """ This is called when a file/directory is requested (even in the list view) """ # FIXME: reloading inside a (functions) folder causes 404 if content: self.load_dag() if self.manager and path in self.manager: return self.manager.get(path, content) model = super(PloomberContentsManager, self).get(path=path, content=content, type=type, format=format) # user requested directory listing, check if there are task functions # defined here if model['type'] == 'directory' and self.manager: if model['content']: model['content'].extend(self.manager.get_by_parent(path)) check_metadata_filter(self.log, model) # if opening a file (ignore file listing), load dag again if (model['content'] and model['type'] == 'notebook'): # Look for the pipeline.yaml file from the file we are rendering # and search recursively. This is required to cover the case when # pipeline.yaml is in a subdirectory from the folder where the # user executed "jupyter notebook" # FIXME: we actually don't need to reload the dag again, we just # have to rebuild the mapping to make _model_in_dag work self.load_dag(starting_dir=Path(os.getcwd(), model['path']).parent) if self._model_in_dag(model): self.log.info('[Ploomber] Injecting cell...') inject_cell(model=model, params=self.dag_mapping[model['path']]._params) return model def save(self, model, path=""): """ This is called when a file is saved """ if self.manager and path in self.manager: out = self.manager.overwrite(model, path) return out else: check_metadata_filter(self.log, model) # not sure what's the difference between model['path'] and path # but path has leading "/", _model_in_dag strips it key = self._model_in_dag(model, path) if key: self.log.info( '[Ploomber] Cleaning up injected cell in {}...'.format( model.get('name') or '')) model['content'] = _cleanup_rendered_nb(model['content']) self.log.info("[Ploomber] Deleting product's metadata...") self.dag_mapping[key].product.metadata.delete() return super(PloomberContentsManager, self).save(model, path) def _model_in_dag(self, model, path=None): """Determine if the model is part of the pipeline """ model_in_dag = False if path is None: path = model['path'] else: path = path.strip('/') if self.dag: if ('content' in model and model['type'] == 'notebook'): if path in self.dag_mapping: # NOTE: not sure why sometimes the model comes with a # name and sometimes it doesn't self.log.info( '[Ploomber] {} is part of the pipeline... '.format( model.get('name') or '')) model_in_dag = True else: self.log.info('[Ploomber] {} is not part of the pipeline, ' 'skipping...'.format( model.get('name') or '')) return path if model_in_dag else False def list_checkpoints(self, path): if not self.manager or path not in self.manager: return self.checkpoints.list_checkpoints(path) def create_checkpoint(self, path): if not self.manager or path not in self.manager: return self.checkpoints.create_checkpoint(self, path) else: return { 'id': 'checkpoint', 'last_modified': datetime.datetime.now() } def _load_jupyter_server_extension(app): """ This function is called to configure the new content manager, there are a lot of quirks that jupytext maintainers had to solve to make it work so we base our implementation on theirs: https://github.com/mwouts/jupytext/blob/bc1b15935e096c280b6630f45e65c331f04f7d9c/jupytext/__init__.py#L19 """ if isinstance(app.contents_manager_class, PloomberContentsManager): app.log.info("[Ploomber] NotebookApp.contents_manager_class " "is a subclass of PloomberContentsManager already - OK") return # The server extension call is too late! # The contents manager was set at NotebookApp.init_configurables # Let's change the contents manager class app.log.info('[Ploomber] setting content manager ' 'to PloomberContentsManager') app.contents_manager_class = PloomberContentsManager try: # And re-run selected init steps from: # https://github.com/jupyter/notebook/blob/ # 132f27306522b32fa667a6b208034cb7a04025c9/notebook/notebookapp.py#L1634-L1638 app.contents_manager = app.contents_manager_class(parent=app, log=app.log) app.session_manager.contents_manager = app.contents_manager app.web_app.settings["contents_manager"] = app.contents_manager except Exception: error = """[Ploomber] An error occured. Please deactivate the server extension with "jupyter serverextension disable ploomber" and configure the contents manager manually by adding c.NotebookApp.contents_manager_class = "ploomber.jupyter.PloomberContentsManager" to your .jupyter/jupyter_notebook_config.py file. """ # noqa app.log.error(error) raise
1.921875
2
quake_reporter/quake_datafeed.py
shandozer/quake_reporter
0
11570
<filename>quake_reporter/quake_datafeed.py #!/usr/bin/env python """ __author__ = <NAME>, 10/8/16 Python 2.7.x """ import json import urllib2 import datetime import argparse VERSION = '0.2.1' def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--magnitude', action="store", type=float, help='Please enter minimum magnitude desired: 1.0, 2.5, or 4.5', default=2.5) parser.add_argument('-t', '--timeframe', action="store", choices=['hour', 'day', 'week', 'month'], help='Collect data over the last hour, day, week, or month.') parser.add_argument('-s', '--savejson', action="store_true", help='Use this flag to save output to a .json') return parser def get_data_from_api(url): page = urllib2.urlopen(url) data = page.read() return data def save_json_data(data, req_details): with open('quake_request_{}_{:%Y_%m_%d_%H:%M}.json'.format(req_details, datetime.datetime.now()), 'wb') as f: json.dump(data, f) def print_results(data, magnitude): json_data = json.loads(data) if 'title' in json_data['metadata']: print json_data['metadata']['title'] count = json_data['metadata']['count'] print '\n--> {} events found in the {}\n'.format(str(count), json_data['metadata']['title'].split(', ')[1]) tsunami_quakes = [quake for quake in json_data['features'] if quake['properties']['tsunami'] == 1] tsunami_count = len(tsunami_quakes) if tsunami_count > 0: print "\t{} of these caused TSUNAMI\n".format(tsunami_count) sorted_json = sorted(json_data['features'], key=lambda k: k['properties'].get('time', 0), reverse=True) for i in sorted_json: print '*' * 18 + '\n' if i['properties']['time']: local_quake_time = i['properties']['time'] quake_date = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=local_quake_time) print 'Date of Quake: {}'.format(quake_date.strftime('%m-%d-%Y %H:%M:%S')) time_since_quake = datetime.timedelta() - datetime.timedelta(days=-quake_date.day, hours=quake_date.hour, minutes=quake_date.minute, seconds=quake_date.second) if i['properties']['tsunami'] == 1: print "\n\t_/*~~~ TSUNAMI CREATED! ~~~*\_\n" if i['properties']['mag']: print '%2.1f' % i['properties']['mag'] + ',', i['properties']['place'], '\n' print 'Depth: ' + str(i['geometry']['coordinates'][2]) + 'km' print '*' * 20 def main(): parser = get_parser() args = parser.parse_args() intro_statement = '\n\nSearching for Global Earthquake Events' if args.timeframe: t = args.timeframe intro_statement += ' within the last {}...'.format(t) else: intro_statement += ' (No timespan selected, using default: 1 week)' t = 'week' print intro_statement if args.magnitude: mag = args.magnitude print '\nMagnitude requested: {}'.format(mag) if mag >= 4.5: mag = 4.5 elif mag > 2.5: mag = 2.5 else: mag = 1.0 # anything less than 2.5 gets the 1.0+ range else: print '\nNo Magnitude requested, using default... (2.5+)' mag = 2.5 # a medium sized default # Now grab your data api_url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/{}_{}.geojson'.format(mag, t) try: data = get_data_from_api(api_url) except urllib2.URLError: print '\nUH OH! We were unable to extract any data! \n\n\t-->Check your Internet/WiFi Access? ' exit(1) if data and args.savejson: request_params = '{}mag-1{}'.format(mag, t) save_json_data(data, request_params) elif data: print_results(data, mag) if __name__ == '__main__': main()
3.1875
3
tests/visualization/data_sources/test_satellite.py
openclimatefix/nowcasting_utils
2
11571
<reponame>openclimatefix/nowcasting_utils<gh_stars>1-10 """ Tests to plot satellite data """ import os import plotly.graph_objects as go from nowcasting_dataset.data_sources.fake.batch import satellite_fake from nowcasting_dataset.geospatial import osgb_to_lat_lon from nowcasting_utils.visualization.data_sources.plot_satellite import ( make_animation_all_channels, make_animation_one_channels, make_traces_one_channel, make_traces_one_channel_one_time, ) from nowcasting_utils.visualization.utils import make_buttons def test_make_traces_one_channel_one_time(configuration): """Test 'make_traces_one_channel_one_time' functions""" satellite = satellite_fake(configuration=configuration) example_index = 1 trace = make_traces_one_channel_one_time( satellite=satellite, example_index=example_index, channel_index=0, time_index=1 ) fig = go.Figure(trace) x = satellite.x[example_index].mean() y = satellite.y[example_index].mean() lat, lon = osgb_to_lat_lon(x=x, y=y) fig.update_layout( mapbox_style="carto-positron", mapbox_zoom=7, mapbox_center={"lat": lat, "lon": lon} ) if "CI" not in os.environ.keys(): fig.show(renderer="browser") def test_make_traces_one_channel(configuration): """Test 'make_traces_one_channel' functions""" satellite = satellite_fake(configuration=configuration) example_index = 1 traces = make_traces_one_channel( satellite=satellite, example_index=example_index, channel_index=0 ) x = satellite.x[example_index].mean() y = satellite.y[example_index].mean() lat, lon = osgb_to_lat_lon(x=x, y=y) frames = [] for i, trace in enumerate(traces[1:]): frames.append(go.Frame(data=trace, name=f"frame{i+1}")) fig = go.Figure( data=traces[0], layout=go.Layout( title="Start Title", ), frames=frames, ) fig.update_layout(updatemenus=[make_buttons()]) fig.update_layout( mapbox_style="carto-positron", mapbox_zoom=7, mapbox_center={"lat": lat, "lon": lon} ) if "CI" not in os.environ.keys(): fig.show(renderer="browser") def test_make_animation_one_channels(configuration): """Test 'make_animation_one_channels' functions""" satellite = satellite_fake(configuration=configuration) fig = make_animation_one_channels(satellite=satellite, example_index=1, channel_index=0) if "CI" not in os.environ.keys(): fig.show(renderer="browser") def test_make_animation_all_channesl(configuration): """Test 'make_animation_all_channels' functions""" satellite = satellite_fake(configuration=configuration) fig = make_animation_all_channels(satellite=satellite, example_index=0) if "CI" not in os.environ.keys(): fig.show(renderer="browser")
2.25
2
owtf/__main__.py
Udbhavbisarya23/owtf
1,514
11572
<gh_stars>1000+ """ owtf.__main__ ~~~~~~~~~~~~~ A __main__ method for OWTF so that internal services can be called as Python modules. """ import sys from owtf.core import main if __name__ == "__main__": main()
1.453125
1
sympyosis/logger.py
ZechCodes/sympyosis
0
11573
<filename>sympyosis/logger.py from enum import IntEnum from typing import Type, TypeVar import logging T = TypeVar("T") class LogLevel(IntEnum): DEBUG = logging.DEBUG INFO = logging.INFO WARNING = logging.WARNING ERROR = logging.ERROR CRITICAL = logging.CRITICAL @classmethod def get(cls: Type[T], name: str) -> T: return getattr(cls, name.upper()) class Logger: def __init__( self, name: str, level: LogLevel, *, parent: logging.Logger | None = None ): self._name = name self._level = level self._parent = parent if parent: self._logger = self._parent.getChild(self._name) else: self._logger = logging.getLogger(name) self.set_level(self._level) def log(self, message: str, level: LogLevel, *args, **kwargs): self._logger.log(level, message, *args, **kwargs) def debug(self, message: str, *args, **kwargs): self.log(message, LogLevel.DEBUG, *args, **kwargs) def info(self, message: str, *args, **kwargs): self.log(message, LogLevel.INFO, *args, **kwargs) def warning(self, message: str, *args, **kwargs): self.log(message, LogLevel.WARNING, *args, **kwargs) def error(self, message: str, *args, **kwargs): self.log(message, LogLevel.ERROR, *args, **kwargs) def critical(self, message: str, *args, **kwargs): self.log(message, LogLevel.CRITICAL, *args, **kwargs) def set_level(self, level: LogLevel): self._level = level self._logger.setLevel(level) def create_child_logger(self, name: str, level: LogLevel | None = None): return Logger(name, self._level, parent=level or self._logger) @staticmethod def initialize_loggers(level: LogLevel = LogLevel.ERROR): logging.basicConfig(level=level)
2.703125
3
peframe/modules/apialert.py
ki1556ki/MJUOpenSource
0
11574
# -*- coding: utf-8 -*- # json 형식 사용을 위한 임폴트 import json # get함수, 각각의 반복문을 통해 apialert_found안에 문자열 삽입후 리스트형식으로 정렬하여 리턴값 반환. def get(pe, strings_match): alerts = strings_match['apialert'] apialert_found = [] # pe에 DIRECTORY_ENTRY_IMPORT라는 변수가 있는지 확인하여 있으면 참 없으면 거짓. if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'): for lib in pe.DIRECTORY_ENTRY_IMPORT: for imp in lib.imports: for alert in alerts: if alert: # remove 'null' # imp.name의 문자열안에 alert의 문자열이 있을경우 apialert_found안의 맨뒤에 imp.name을 넣음 if str(imp.name).startswith(alert): apialert_found.append(imp.name) return sorted(set(apialert_found))
2.28125
2
tests/conftest.py
Ninjagod1251/ape
0
11575
import shutil from pathlib import Path from tempfile import mkdtemp import pytest from click.testing import CliRunner import ape # NOTE: Ensure that we don't use local paths for these ape.config.DATA_FOLDER = Path(mkdtemp()).resolve() ape.config.PROJECT_FOLDER = Path(mkdtemp()).resolve() @pytest.fixture(scope="session") def config(): yield ape.config @pytest.fixture(scope="session") def data_folder(config): yield config.DATA_FOLDER @pytest.fixture(scope="session") def plugin_manager(): yield ape.networks.plugin_manager @pytest.fixture(scope="session") def accounts(): yield ape.accounts @pytest.fixture(scope="session") def compilers(): yield ape.compilers @pytest.fixture(scope="session") def networks(): yield ape.networks @pytest.fixture(scope="session") def chain(): yield ape.chain @pytest.fixture(scope="session") def project_folder(config): yield config.PROJECT_FOLDER @pytest.fixture(scope="session") def project(config): yield ape.Project(config.PROJECT_FOLDER) @pytest.fixture def keyparams(): # NOTE: password is 'a' return { "address": "7e5f4552091a69125d5dfcb7b8c2659029395bdf", "crypto": { "cipher": "aes-128-ctr", "cipherparams": {"iv": "7bc492fb5dca4fe80fd47645b2aad0ff"}, "ciphertext": "43beb65018a35c31494f642ec535315897634b021d7ec5bb8e0e2172387e2812", "kdf": "scrypt", "kdfparams": { "dklen": 32, "n": 262144, "r": 1, "p": 8, "salt": "<PASSWORD>", }, "mac": "6a1d520975a031e11fc16cff610f5ae7476bcae4f2f598bc59ccffeae33b1caa", }, "id": "ee<PASSWORD>", "version": 3, } @pytest.fixture def temp_accounts_path(config): path = Path(config.DATA_FOLDER) / "accounts" path.mkdir(exist_ok=True, parents=True) yield path if path.exists(): shutil.rmtree(path) @pytest.fixture def runner(project): yield CliRunner()
2.078125
2
recipes/Python/576543_Prime_Number_Generator_Checker/recipe-576543.py
tdiprima/code
2,023
11576
<reponame>tdiprima/code<gh_stars>1000+ # # prime number generator # This program gets two number as input # and prints # Prime numbers in the range # Actual number of primes in the range # and Estimation based on formula # n # pi(n)= ------- # log(n) # pi(n)=number of primes less than n # from math import * def isPrime(n): if n%2==0 and n!=2:return False #if number is EVEN AND it is NOT 2 k = n**0.5 ; m = ceil(k) #if number is PERFECT SQUARE if k==m:return False for i in xrange(3,int(m),2): #divisibility test ODDS ONLY if n%i==0:return False return True #otherwise it is PRIME if __name__=='__main__': s = input('Enter Start: ') e = input('Enter End: ') s|=1 #if s%2==0:s+=1 # ODDS only list = [x for x in range(s,e,2) if isPrime(x)] print list,'\n',len(list),'\n',int(ceil(e/log(e)-s/log(s))) #prints list of primes , length of list , estimate using the formula
3.8125
4
Practica 1 E4.py
pardo13/python
0
11577
<gh_stars>0 A=int(input("dame int")) B=int(input("dame int")) if(A>B): print("A es mayor") else: print("B es mayor")
3.65625
4
selenium_driver_updater/_phantomJS.py
Svinokur/selenium_driver_updater
8
11578
#pylint: disable=logging-fstring-interpolation #Standart library imports import shutil import os import time from typing import Tuple from pathlib import Path import re from shutil import copyfile import wget # Local imports from selenium_driver_updater.util.logger import logger from selenium_driver_updater.util.exceptions import DriverVersionInvalidException from selenium_driver_updater.driver_base import DriverBase class PhantomJS(DriverBase): "Class for working with Selenium phantomjs binary" _repo_name = 'ariya/phantomjs' _tmp_folder_path = 'tmp' def __init__(self, **kwargs): kwargs.update(repo_name=PhantomJS._repo_name) DriverBase.__init__(self, **kwargs) self.system_name = '' #assign of specific os specific_system = str(kwargs.get('system_name', '')) specific_system = specific_system.replace('linux64', 'linux-x86_64') specific_system = specific_system.replace('linux32', 'linux-i686').replace('macos', 'macosx') if specific_system: self.system_name = "phantomjs-{}-" + f"{specific_system}" if 'win' in specific_system: self.system_name = "phantomjs-{}-windows" if 'linux' in specific_system: self.system_name = self.system_name + '.tar.bz2' else: self.system_name = self.system_name + '.zip' self.phantomjs_path = self.driver_path def _get_latest_version_phantomjs(self) -> str: """Gets latest phantomjs version Returns: str latest_version (str) : Latest version of phantomjs. """ latest_version : str = '' repo_name = PhantomJS._repo_name latest_version = self.github_viewer.get_latest_release_tag_by_repo_name(repo_name=repo_name) logger.info(f'Latest version of phantomjs: {latest_version}') return latest_version def _compare_current_version_and_latest_version_phantomjs(self) -> Tuple[bool, str, str]: """Compares current version of phantomjs to latest version Returns: Tuple of bool, str and str is_driver_up_to_date (bool) : It true the driver is up to date. Defaults to False. current_version (str) : Current version of the driver. latest_version (str) : Latest version of the driver. """ is_driver_up_to_date : bool = False current_version : str = '' latest_version : str = '' current_version = super()._get_current_version_driver() if not current_version: return is_driver_up_to_date, current_version, latest_version latest_version = self._get_latest_version_phantomjs() if current_version == latest_version: is_driver_up_to_date = True message = ('Your existing phantomjs is up to date.' f'current_version: {current_version} latest_version: {latest_version}') logger.info(message) return is_driver_up_to_date, current_version, latest_version def _check_if_phantomjs_is_up_to_date(self) -> str: """Сhecks for the latest version, downloads or updates phantomjs binary Returns: str driver_path (str) : Path where phantomjs was downloaded or updated. """ driver_path : str = '' if self.check_driver_is_up_to_date and not self.system_name: is_driver_up_to_date, current_version, latest_version = self._compare_current_version_and_latest_version_phantomjs() if is_driver_up_to_date: return self.phantomjs_path driver_path = self._download_driver() if self.check_driver_is_up_to_date and not self.system_name: is_driver_up_to_date, current_version, latest_version = self._compare_current_version_and_latest_version_phantomjs() if not is_driver_up_to_date: message = ('Problem with updating phantomjs' f'current_version: {current_version} latest_version: {latest_version}') logger.error(message) message = 'Trying to download previous latest version of phantomjs' logger.info(message) driver_path = self._download_driver(previous_version=True) return driver_path def __rename_driver(self, archive_folder_path : str, archive_driver_path : str) -> None: """Renames phantomjs if it was given Args: archive_folder_path (str) : Path to the main folder archive_driver_path (str) : Path to the phantomjs archive """ renamed_driver_path : str = '' new_path = archive_folder_path + os.path.sep + self.filename if not archive_folder_path.endswith(os.path.sep) else archive_folder_path + self.filename if Path(new_path).exists(): Path(new_path).unlink() os.rename(archive_driver_path, new_path) renamed_driver_path = self.path + self.filename if Path(renamed_driver_path).exists(): Path(renamed_driver_path).unlink() copyfile(new_path, renamed_driver_path) def main(self) -> str: """Main function, checks for the latest version, downloads or updates phantomjs binary or downloads specific version of phantomjs. Returns: str driver_path (str) : Path where phantomjs was downloaded or updated. """ driver_path : str = '' if not self.version: driver_path = self._check_if_phantomjs_is_up_to_date() else: driver_path = self._download_driver(version=self.version) return driver_path def _get_latest_previous_version_phantomjs_via_requests(self) -> str: """Gets previous latest phantomjs version Returns: str latest_version_previous (str) : Latest previous version of phantomjs. """ latest_previous_version : str = '' all_versions = [] url = self.setting["PhantomJS"]["LinkAllReleases"] json_data = self.requests_getter.get_result_by_request(url=url, is_json=True) values = json_data.get('values') for value in values: value_name = value.get('name') if not 'beta' in value_name: find_string = re.findall(self.setting["Program"]["wedriverVersionPattern"], value_name) version = find_string[0] if len(find_string) > 0 else '' all_versions.append(version) all_versions = list(set(all_versions)) all_versions.sort(key=lambda s: list(map(int, s.split('.')))) latest_previous_version = all_versions[len(all_versions)-2] logger.info(f'Latest previous version of phantomjs: {latest_previous_version}') return latest_previous_version def _check_if_version_is_valid(self, url : str) -> None: """Checks the specified version for existence. Args: url (str) : Full download url of chromedriver. """ archive_name : str = url.split("/")[len(url.split("/"))-1] url_releases : str = self.setting["PhantomJS"]["LinkAllReleases"] is_found : bool = False while is_found is False: json_data = self.requests_getter.get_result_by_request(url=url_releases, is_json=True) for data in json_data.get('values'): if data.get('name') == archive_name: is_found = True break url_releases = json_data.get('next') if not url_releases: break if not is_found: message = f'Wrong version or system_name was specified. archive_name: {archive_name} url: {url}' raise DriverVersionInvalidException(message) def _download_driver(self, version : str = '', previous_version : bool = False) -> str: """Function to download, delete or upgrade current phantomjs Args: version (str) : Specific phantomjs version to download. Defaults to empty string. previous_version (boll) : If true, phantomjs latest previous version will be downloaded. Defaults to False. Returns: str driver_path (str) : Path to unzipped driver. """ url : str = '' latest_version : str = '' latest_previous_version : str = '' driver_path : str = '' if self.upgrade: super()._delete_current_driver_for_current_os() if version: logger.info(f'Started download phantomjs specific_version: {version}') url = self.setting["PhantomJS"]["LinkLastReleaseFile"].format(version) elif previous_version: latest_previous_version = self._get_latest_previous_version_phantomjs_via_requests() logger.info(f'Started download phantomjs latest_previous_version: {latest_previous_version}') url = self.setting["PhantomJS"]["LinkLastReleaseFile"].format(latest_previous_version) else: latest_version = self._get_latest_version_phantomjs() logger.info(f'Started download phantomjs latest_version: {latest_version}') url = self.setting["PhantomJS"]["LinkLastReleaseFile"].format(latest_version) if self.system_name: url = url.replace(url.split("/")[-1], '') version = [value for key,value in locals().items() if 'version' in key and value][0] url = url + self.system_name.format(version) logger.info(f'Started downloading geckodriver for specific system: {self.system_name}') if any([version, self.system_name ,latest_previous_version]): self._check_if_version_is_valid(url=url) archive_name = url.split("/")[-1] out_path = self.path + archive_name if Path(out_path).exists(): Path(out_path).unlink() logger.info(f'Started download phantomjs by url: {url}') if self.info_messages: archive_path = wget.download(url=url, out=out_path) else: archive_path = wget.download(url=url, out=out_path, bar=None) time.sleep(2) logger.info(f'PhantomJS was downloaded to path: {archive_path}') out_path = self.path parameters = dict(archive_path=archive_path, out_path=out_path) self.extractor.extract_and_detect_archive_format(**parameters) platform : str = self.setting["PhantomJS"]["LastReleasePlatform"] archive_path_folder = self.path + url.split("/")[-1].replace('.zip', '').replace(".tar.bz2", '') + os.path.sep archive_path_folder_bin = archive_path_folder + 'bin' + os.path.sep driver_archive_path = archive_path_folder_bin + platform if not self.filename: copyfile(driver_archive_path, self.path + platform) else: parameters = dict(archive_folder_path=archive_path_folder_bin, archive_driver_path=driver_archive_path) self.__rename_driver(**parameters) if Path(archive_path_folder).exists(): shutil.rmtree(archive_path_folder) driver_path = self.phantomjs_path logger.info(f'PhantomJS was successfully unpacked by path: {driver_path}') if self.chmod: super()._chmod_driver() return driver_path
2.125
2
src/ychaos/settings.py
vanderh0ff/ychaos
8
11579
# Copyright 2021, Yahoo # Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms from pathlib import Path from typing import Optional, Union from pydantic import BaseModel class ApplicationSettings(BaseModel): """ Defines the Global Settings that are consistent in both Development & Production scenarios """ APP = "YChaos" APP_DESC = "YChaos, The resilience testing framework" PROG = "ychaos" COMMAND_IDENTIFIER = "_cmd.{}" LOG_FILE_PATH: Optional[Path] = None @classmethod def get_instance(cls): return cls() @classmethod def get_version(cls): import pkg_resources return pkg_resources.get_distribution(cls.get_instance().PROG).version class DevSettings(ApplicationSettings): """ Defines the Development settings for YChaos Application. """ CONFIG = "dev" class ProdSettings(DevSettings): """ Defines the Production settings for YChaos Application Prod Settings overrides the Dev Settings class and redefines all the constants defined in DevSettings that can be used in the production scenario """ CONFIG = "prod" class Settings: __instance: Optional[Union[DevSettings, ProdSettings]] = None @classmethod def get_instance(cls) -> Union[DevSettings, ProdSettings]: if cls.__instance is None: cls(config="prod") assert cls.__instance is not None return cls.__instance def __init__(self, config): if config == "dev": self.__class__.__instance = DevSettings() elif config == "prod": self.__class__.__instance = ProdSettings() else: raise AttributeError("Unknown configuration found")
2.1875
2
tests/func/test_pipeline.py
kacmak7/dvc
0
11580
<gh_stars>0 import logging from dvc.main import main from tests.basic_env import TestDvc from tests.func.test_repro import TestRepro from tests.func.test_repro import TestReproChangedDeepData class TestPipelineShowSingle(TestDvc): def setUp(self): super().setUp() self.stage = "foo.dvc" ret = main(["add", self.FOO]) self.assertEqual(ret, 0) def test(self): ret = main(["pipeline", "show", self.stage]) self.assertEqual(ret, 0) def test_commands(self): ret = main(["pipeline", "show", self.stage, "--commands"]) self.assertEqual(ret, 0) def test_outs(self): ret = main(["pipeline", "show", self.stage, "--outs"]) self.assertEqual(ret, 0) def test_dot(self): ret = main(["pipeline", "show", "--dot", self.stage]) self.assertEqual(ret, 0) def test_tree(self): ret = main(["pipeline", "show", "--tree", self.stage]) self.assertEqual(ret, 0) def test_ascii_outs(self): ret = main(["pipeline", "show", "--ascii", self.stage, "--outs"]) self.assertEqual(ret, 0) def test_dot_commands(self): ret = main(["pipeline", "show", "--dot", self.stage, "--commands"]) self.assertEqual(ret, 0) def test_dot_outs(self): ret = main(["pipeline", "show", "--dot", self.stage, "--outs"]) self.assertEqual(ret, 0) def test_not_dvc_file(self): ret = main(["pipeline", "show", self.FOO]) self.assertNotEqual(ret, 0) def test_non_existing(self): ret = main(["pipeline", "show", "non-existing"]) self.assertNotEqual(ret, 0) def test_single_ascii(repo_dir, dvc_repo): dvc_repo.add(repo_dir.FOO) assert main(["pipeline", "show", "--ascii", "foo.dvc"]) == 0 def test_single_ascii_commands(repo_dir, dvc_repo): dvc_repo.add(repo_dir.FOO) assert main(["pipeline", "show", "--ascii", "foo.dvc", "--commands"]) == 0 class TestPipelineShow(TestRepro): def test(self): ret = main(["pipeline", "show", self.file1_stage]) self.assertEqual(ret, 0) def test_commands(self): ret = main(["pipeline", "show", self.file1_stage, "--commands"]) self.assertEqual(ret, 0) def test_ascii(self): ret = main(["pipeline", "show", "--ascii", self.file1_stage]) self.assertEqual(ret, 0) def test_dot(self): ret = main(["pipeline", "show", "--dot", self.file1_stage]) self.assertEqual(ret, 0) def test_ascii_commands(self): ret = main( ["pipeline", "show", "--ascii", self.file1_stage, "--commands"] ) self.assertEqual(ret, 0) def test_ascii_outs(self): ret = main(["pipeline", "show", "--ascii", self.file1_stage, "--outs"]) self.assertEqual(ret, 0) def test_dot_commands(self): ret = main( ["pipeline", "show", "--dot", self.file1_stage, "--commands"] ) self.assertEqual(ret, 0) def test_print_locked_stages(repo_dir, dvc_repo, caplog): dvc_repo.add("foo") dvc_repo.add("bar") dvc_repo.lock_stage("foo.dvc") caplog.clear() with caplog.at_level(logging.INFO, logger="dvc"): assert main(["pipeline", "show", "foo.dvc", "--locked"]) == 0 assert "foo.dvc" in caplog.text assert "bar.dvc" not in caplog.text def test_dot_outs(repo_dir, dvc_repo): dvc_repo.add(repo_dir.FOO) dvc_repo.run( outs=["file"], deps=[repo_dir.FOO, repo_dir.CODE], cmd="python {} {} {}".format(repo_dir.CODE, repo_dir.FOO, "file"), ) assert main(["pipeline", "show", "--dot", "file.dvc", "--outs"]) == 0 class TestPipelineShowOuts(TestRepro): def setUp(self): super().setUp() def test_outs(self): ret = main(["pipeline", "show", self.file1_stage, "--outs"]) self.assertEqual(ret, 0) class TestPipelineShowDeep(TestReproChangedDeepData): def test(self): ret = main(["pipeline", "show", self.file1_stage]) self.assertEqual(ret, 0) def test_commands(self): ret = main(["pipeline", "show", self.file1_stage, "--commands"]) self.assertEqual(ret, 0) def test_outs(self): ret = main(["pipeline", "show", self.file1_stage, "--outs"]) self.assertEqual(ret, 0) def test_ascii(self): ret = main(["pipeline", "show", "--ascii", self.file1_stage]) self.assertEqual(ret, 0) def test_dot(self): ret = main(["pipeline", "show", "--dot", self.file1_stage]) self.assertEqual(ret, 0) def test_ascii_commands(self): ret = main( ["pipeline", "show", "--ascii", self.file1_stage, "--commands"] ) self.assertEqual(ret, 0) def test_ascii_outs(self): ret = main(["pipeline", "show", "--ascii", self.file1_stage, "--outs"]) self.assertEqual(ret, 0) def test_dot_commands(self): ret = main( ["pipeline", "show", "--dot", self.file1_stage, "--commands"] ) self.assertEqual(ret, 0) def test_dot_outs(self): ret = main(["pipeline", "show", "--dot", self.file1_stage, "--outs"]) self.assertEqual(ret, 0) class TestPipelineListEmpty(TestDvc): def test(self): ret = main(["pipeline", "list"]) self.assertEqual(ret, 0) class TestPipelineListSingle(TestPipelineShowDeep): def test(self): ret = main(["pipeline", "list"]) self.assertEqual(ret, 0) class TestDvcRepoPipeline(TestDvc): def test_no_stages(self): pipelines = self.dvc.pipelines self.assertEqual(len(pipelines), 0) def one_pipeline(self): self.dvc.add("foo") self.dvc.run(deps=["foo"], outs=["bar"], cmd="") self.dvc.run(deps=["bar"], outs=["baz"], cmd="echo baz > baz") pipelines = self.dvc.pipelines self.assertEqual(len(pipelines), 1) self.assertEqual(pipelines[0].nodes, 3) self.assertEqual(pipelines[0].edges, 2) def two_pipelines(self): self.dvc.add("foo") self.dvc.run(deps=["foo"], outs=["bar"], cmd="") self.dvc.run(deps=["bar"], outs=["baz"], cmd="echo baz > baz") self.dvc.add("code.py") pipelines = self.dvc.pipelines self.assertEqual(len(pipelines), 2) self.assertEqual(pipelines[0].nodes, 3) self.assertEqual(pipelines[0].edges, 2) self.assertEqual(pipelines[1].nodes, 1) self.assertEqual(pipelines[1].edges, 0) def locked_stage(self): self.dvc.add("foo") self.dvc.lock_stage("foo.dvc") pipelines = self.dvc.pipelines self.assertEqual(len(pipelines), 0)
2.359375
2
my_modes/ChaseLoop.py
mjocean/T2Game
0
11581
<reponame>mjocean/T2Game<filename>my_modes/ChaseLoop.py import procgame.game from procgame.game import AdvancedMode import logging class ChaseLoop(procgame.game.AdvancedMode): """ Example of T2 "Chase Loop" functionality (described in the rules PDF on page J) TODO: Sound effects, other visual feedback?? """ def __init__(self, game): super(ChaseLoop, self).__init__(game=game, priority=30, mode_type=AdvancedMode.Game) # useful to set-up a custom logger so it's easier to track debugging messages for this mode self.logger = logging.getLogger('ChaseLoop') # the names of the progress lamps as a list for easier code # via indexing, later self.chase_lamps = ["twofiftyK", "fivehunK", "sevenfiftyK", "oneMil", "threeMil", "fiveMil"] self.collected = 0 # the number the player already has self.loop_seq = [False, False, False, False] self.awards = [250000, 500000, 750000, 1000000, 3000000, 5000000] # the list of awards pass def evt_player_added(self, player): player.setState('chase_current',0) def evt_ball_starting(self): self.cancel_delayed(name="disabler") self.mid_switches = [False, False, False] self.collected = 0 # progress resets on new ball self.sync_lamps_to_progress() self.loop_seq = [False, False, False, False] def evt_ball_ending(self, (shoot_again, last_ball)): self.cancel_delayed(name="disabler") def debug(self): # self.logger.info("escL: %d, escH: %d, clH:%d, clL:%d" % (self.game.switches.escapeL.hw_timestamp, self.game.switches.escapeH.hw_timestamp, self.game.switches.chaseLoopHigh.hw_timestamp, self.game.switches.chaseLoopLow.hw_timestamp)) self.logger.info("collected = %d" % (self.collected)) def sw_chaseLoopLow_active(self, sw): self.seq_handler(0) def sw_chaseLoopHigh_active(self, sw): self.seq_handler(1) def sw_escapeH_active(self, sw): self.seq_handler(2) def sw_escapeL_active(self, sw): if(self.seq_handler(3)): # loop complete self.chase_loop_award() self.loop_seq = [False, False, False, False] def seq_handler(self, num): self.cancel_delayed(name="clear_%d" % num) # if a previous switch is False, no sequence if(False in self.loop_seq[0:num]): self.logger.info("saw later switch -- sequence destroyed") for i in range(0,num): self.reset_switch_memory(i) self.loop_seq[num] = False self.logger.info("hit %d | Sequence: %s" % (num, self.loop_seq)) return False self.loop_seq[num] = True # clear later switches for i in range(num+1,4): self.reset_switch_memory(i) self.logger.info("hit %d | Sequence: %s" % (num, self.loop_seq)) if(num!=3): self.delay(name="clear_%d" % num, delay=4.0, handler=self.reset_switch_memory, param=num) return True def reset_switch_memory(self, switch_num): self.cancel_delayed(name="clear_%d" % switch_num) if(self.loop_seq[switch_num] == False): return # nothing to do self.loop_seq[switch_num] = False self.logger.info("RESET %d | Sequence: %s" % (switch_num, self.loop_seq)) def OFF_sw_escapeL_active(self, sw): self.debug() if(self.game.switches.chaseLoopLow.hw_timestamp == None): return procgame.game.SwitchContinue if (((self.game.switches.escapeL.hw_timestamp - self.game.switches.chaseLoopLow.hw_timestamp) < 2000) and (self.game.switches.escapeL.hw_timestamp > self.game.switches.escapeH.hw_timestamp) and (self.game.switches.escapeH.hw_timestamp > self.game.switches.chaseLoopHigh.hw_timestamp) and (self.game.switches.chaseLoopHigh.hw_timestamp > self.game.switches.chaseLoopLow.hw_timestamp)): self.chase_loop_award() return procgame.game.SwitchStop else: return procgame.game.SwitchContinue def chase_loop_award(self): self.sync_lamps_to_progress(special=self.collected) self.game.displayText("Chase Loop " + str(self.awards[self.collected])) self.game.score(self.awards[self.collected]) if(self.collected < len(self.chase_lamps)-1): self.collected += 1 else: # already got them all pass self.debug() self.delay(name="lamp_sync", delay=1.0, handler=self.sync_lamps_to_progress) def disable_progress_lamps(self): for l in self.chase_lamps: self.game.lamps[l].disable() def set_lamp(self, lamp_name, state): l = self.game.lamps[lamp_name] if(state==0): l.disable() elif(state==1): l.enable() elif(state==2): l.schedule(0xff00ff00) elif(state==3): l.schedule(0xf0f0f0f0) def sync_lamps_to_progress(self, special=None): self.cancel_delayed(name="lamp_sync") for i in range(0, len(self.chase_lamps)): l_state = 0 if(special is not None and i==special): l_state=3 elif(self.collected>i): l_state = 1 elif(self.collected==i): l_state = 2 self.logger.info("setting " + self.chase_lamps[i] + " to " + str(l_state)) self.set_lamp(self.chase_lamps[i], l_state)
2.5625
3
app/dists/admin.py
ariashahverdi/Backend
0
11582
from django.contrib import admin from .models import Distribution admin.site.register(Distribution) # Register your models here.
1.234375
1
examples_ltnw/binary_classifier.py
gilbeckers/logictensornetworks
0
11583
<filename>examples_ltnw/binary_classifier.py # -*- coding: utf-8 -*- import logging; logging.basicConfig(level=logging.INFO) import numpy as np import matplotlib.pyplot as plt import logictensornetworks_wrapper as ltnw nr_samples=500 data=np.random.uniform([0,0],[1.,1.],(nr_samples,2)).astype(np.float32) data_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)<.09)] data_not_A=data[np.where(np.sum(np.square(data-[.5,.5]),axis=1)>=.09)] ltnw.variable("?data_A",data_A) ltnw.variable("?data_not_A",data_not_A) ltnw.variable("?data",data) ltnw.predicate("A",2) ltnw.axiom("forall ?data_A: A(?data_A)") ltnw.axiom("forall ?data_not_A: ~A(?data_not_A)") ltnw.initialize_knowledgebase(initial_sat_level_threshold=.1) sat_level=ltnw.train(track_sat_levels=1000,sat_level_epsilon=.99) plt.figure(figsize=(12,8)) result=ltnw.ask("A(?data)") plt.subplot(2,2,1) plt.scatter(data[:,0],data[:,1],c=result.squeeze()) plt.colorbar() plt.title("A(x) - training data") result=ltnw.ask("~A(?data)") plt.subplot(2,2,2) plt.scatter(data[:,0],data[:,1],c=result.squeeze()) plt.colorbar() plt.title("~A(x) - training data") data_test=np.random.uniform([0,0],[1.,1.],(500,2)).astype(np.float32) ltnw.variable("?data_test",data_test) result=ltnw.ask("A(?data_test)") plt.subplot(2,2,3) plt.title("A(x) - test") plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze()) plt.colorbar() plt.title("A(x) - test data") result=ltnw.ask("~A(?data_test)") plt.subplot(2,2,4) plt.scatter(data_test[:,0],data_test[:,1],c=result.squeeze()) plt.title("~A(x) - test data") plt.show() ltnw.constant("a",[0.25,.5]) ltnw.constant("b",[1.,1.]) print("a is in A: %s" % ltnw.ask("A(a)")) print("b is in A: %s" % ltnw.ask("A(b)"))
2.75
3
leetcode/0015_3Sum/result.py
theck17/notes
0
11584
<reponame>theck17/notes # !/usr/bin/env python3 # Author: C.K # Email: <EMAIL> # DateTime:2021-03-15 00:07:14 # Description: class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: result = set() for i in range(0, len(nums) - 1): # Reduce the problem to two sum(0) two_sum = -nums[i] cache = set() for num in nums[i + 1:]: remaining = two_sum - num if remaining in cache: #sorting to create unique tuples triplet = tuple(sorted([nums[i], remaining, num])) # using tuple in a set will eliminate duplicates combinations result.add(triplet) else: cache.add(num) return result if __name__ == "__main__": pass
3.828125
4
customer/views.py
lautarianoo/django_shop
0
11585
from django.contrib.auth import authenticate, login from django.shortcuts import render, redirect from cart.models import Cart from django.views import View from .forms import LoginForm, RegistrationForm, CreateCompanyForm from customer.models import Customer, ShippingAddress from src.utils.mixins import CustomerMixin from checkout.models import ApplyOrganization class LoginView(CustomerMixin, View): def get(self, request, *args, **kwargs): if request.user.is_authenticated: return redirect('catalog') form = LoginForm() return render(request, 'customer/login.html', {'form': form}) def post(self, request, *args, **kwargs): form = LoginForm(request.POST or None) if form.is_valid(): email = form.cleaned_data['email'] password = form.cleaned_data['password'] user = authenticate(request, email=email, password=password) if user: login(request, user) return redirect('catalog') return render(request, 'customer/login.html', {'form': form}) class RegistrationView(View): def get(self, request, *args, **kwargs): if request.user.is_authenticated: return redirect('catalog') form = RegistrationForm() return render(request, 'customer/register.html', {'form': form}) def post(self, request, *args, **kwargs): form = RegistrationForm(request.POST or None, request.FILES or None) if form.is_valid(): new_user = form.save(commit=False) customer = Customer.objects.create(user=new_user, status="Unrecognized") customer.save() cart = Cart.objects.create(customer=customer) cart.save() address = ShippingAddress.objects.create(customer=customer) address.save() new_user.set_password(form.cleaned_data['<PASSWORD>']) new_user.save() return redirect('login') return render(request, 'customer/register.html', {'form': form}) class CreateCompany(View): def get(self, request, *args, **kwargs): if request.user.is_authenticated and request.user.STATUS_AUTH == "Recognized": form = CreateCompanyForm() return render(request, 'customer/create_company.html', {'form': form}) return redirect('catalog') def post(self, request, *args, **kwargs): if request.user.is_authenticated and request.user.STATUS_AUTH == "Recognized": form = CreateCompanyForm(request.POST or None, request.FILES or None) if form.is_valid(): new_company = form.save(commit=False) new_company.STATUS_COMPANY = "No verify" new_company.user = request.user new_company.save() return redirect('catalog') return render(request, 'customer/register.html', {'form': form})
2.171875
2
kickstarter/app.py
Annapurnaj91/kickstarter3
0
11586
from flask import Flask, render_template, request # from .recommendation import * # import pickle import pandas as pd import numpy as np # import keras # from keras.models import load_model import pickle def create_app(): # initializes our app APP = Flask(__name__) @APP.route('/') def form(): return render_template('base.html') @APP.route('/data/', methods=['GET', 'POST']) def data(): if request.method == 'POST': # Get form data name = request.form.get('name') blurb = request.form.get('blurb', 'default') country = request.form.get('country', 'default') backers_count = request.form.get('backers_count', 'default') prediction = preprocessDataAndPredict(name, blurb, country, backers_count) # print(prediction[0]) return render_template('data.html', prediction=prediction[0]) def preprocessDataAndPredict(name, blurb, country, backers_count): # test_data = (blurb) test_data = (name, blurb, country, backers_count) # print(test_data) test_data = np.array(test_data) dftest = pd.DataFrame(test_data).T dftest.columns = ['name', 'blurb', 'country', 'backers_count'] print(dftest) print(dftest.shape) # test_data = test_data.reshape(1, -1) # print(test_data) #file = open("model.pkl", "wb") model = pickle.load( open('model_knn', 'rb')) # model = pickle.load( # open('Kickstarter2/kickstarter/kick_model(1)', 'rb')) prediction = model.predict(dftest) # print(prediction) return prediction # return prediction return APP
2.75
3
src/apiron/service/discoverable.py
tushar-deepsource/apiron
109
11587
from typing import List, Type from apiron.service.base import ServiceBase class DiscoverableService(ServiceBase): """ A Service whose hosts are determined via a host resolver. A host resolver is any class with a :func:`resolve` method that takes a service name as its sole argument and returns a list of host names that correspond to that service. """ host_resolver_class: Type service_name: str @classmethod def get_hosts(cls) -> List[str]: return cls.host_resolver_class.resolve(cls.service_name) def __str__(self) -> str: return self.service_name def __repr__(self) -> str: klass = self.__class__ return "{klass}(service_name={service_name}, host_resolver={host_resolver})".format( klass=klass.__name__, service_name=klass.service_name, host_resolver=klass.host_resolver_class.__name__ )
2.96875
3
src/plotman/plot_util.py
rafaelsteil/plotman
0
11588
<filename>src/plotman/plot_util.py import math import os import re import shutil from plotman import job GB = 1_000_000_000 def df_b(d): 'Return free space for directory (in bytes)' usage = shutil.disk_usage(d) return usage.free def get_k32_plotsize(): return 108 * GB def is_valid_plot_dst(d, sched_cfg, all_jobs): if sched_cfg.stop_when_dst_full: space = df_b(d) # Subtract space for current jobs which will be moved to the dir # Note: This is underestimates the free space available when a # job is in phase 4 since the plot is partially moved to dst, # once phase 4 is complete a new plot will eventually kick off jobs_to_dstdir = job.job_phases_for_dstdir(d, all_jobs) space -= len(jobs_to_dstdir) * get_k32_plotsize() return enough_space_for_k32(space) return True def enough_space_for_k32(b): 'Determine if there is enough space for a k32 given a number of free bytes' return b > 1.2 * get_k32_plotsize() def human_format(num, precision): magnitude = 0 while abs(num) >= 1000: magnitude += 1 num /= 1000.0 return (('%.' + str(precision) + 'f%s') % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])) def time_format(sec): if sec is None: return '-' if sec < 60: return '%ds' % sec else: return '%d:%02d' % (int(sec / 3600), int((sec % 3600) / 60)) def tmpdir_phases_str(tmpdir_phases_pair): tmpdir = tmpdir_phases_pair[0] phases = tmpdir_phases_pair[1] phase_str = ', '.join(['%d:%d' % ph_subph for ph_subph in sorted(phases)]) return ('%s (%s)' % (tmpdir, phase_str)) def split_path_prefix(items): if not items: return ('', []) prefix = os.path.commonpath(items) if prefix == '/': return ('', items) else: remainders = [ os.path.relpath(i, prefix) for i in items ] return (prefix, remainders) def list_k32_plots(d): 'List completed k32 plots in a directory (not recursive)' plots = [] for plot in os.listdir(d): if re.match(r'^plot-k32-.*plot$', plot): plot = os.path.join(d, plot) try: if os.stat(plot).st_size > (0.95 * get_k32_plotsize()): plots.append(plot) except FileNotFoundError: continue return plots def column_wrap(items, n_cols, filler=None): '''Take items, distribute among n_cols columns, and return a set of rows containing the slices of those columns.''' rows = [] n_rows = math.ceil(len(items) / n_cols) for row in range(n_rows): row_items = items[row : : n_rows] # Pad and truncate rows.append( (row_items + ([filler] * n_cols))[:n_cols] ) return rows
2.609375
3
src/config.py
forkedbranch/femm-opt
1
11589
<filename>src/config.py # (c) Copyright 2016 forkedbranch (http://forkedbranch.eu/) # Licensed under the Apache License, Version 2.0 import configparser config = configparser.ConfigParser() config.read('config.ini') def get_input_folder(): return config['DEFAULT']['InputFolder'] def get_output_folder(): return config['DEFAULT']['OutputForlder'] def get_femm_exe(): return config['DEFAULT']['FemmExe'] def get_ffmpeg_exe(): return config['DEFAULT']['FfmpegExe'] def get_femm_scr_templ(): return config['DEFAULT']['FemmScrTempl'] def get_femm_scr_lib(): return config['DEFAULT']['FemmScrLib']
1.726563
2
vespene/workers/registration.py
Conan-Kudo/vespene
680
11590
# Copyright 2018, <NAME> LLC # License: Apache License Version 2.0 # ------------------------------------------------------------------------- # registration.py - updates the database to say who is building something # and what the current settings are, which is used by the file serving # code to see if it is ok to serve up files in the buildroot. But also # for record keeping. # -------------------------------------------------------------------------- from datetime import datetime import random import fcntl import subprocess import os from django.utils import timezone from django.conf import settings from vespene.common.logger import Logger from vespene.models.worker import Worker LOG = Logger() WORKER_ID_FILE = "/etc/vespene/worker_id" # ============================================================================= class RegistrationManager(object): def __init__(self, builder, build): self.builder = builder self.build = build self.project = self.build.project def create_worker_id(self): wid = ''.join(random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)) fd = open(WORKER_ID_FILE, "w+") fd.write(wid) fd.close() return wid def get_worker_id(self, fd): return fd.readlines()[0].strip() def get_worker_record(self, worker_id): qs = Worker.objects.filter(worker_uid=worker_id) if not qs.exists(): return None return qs.first() # worker_pool = models.ForeignKey('WorkerPool', null=False, on_delete=models.SET_NULL) # hostname = models.CharField(max_length=1024, null=True) # port = models.IntField(null=False, default=8080) # working_dir = models.CharField(max_length=1024, null=True) # first_checkin = models.DateTimeField(null=True, blank=True) # last_checkin = models.DateTimeField(null=True, blank=True) # fileserving_enabled = models.BooleanField(null=False, default=False) def get_hostname(self): if settings.FILESERVING_HOSTNAME: return settings.FILESERVING_HOSTNAME return self.guess_hostname() def guess_hostname(self): return subprocess.check_output("hostname").decode('utf-8').strip() def get_port(self): if settings.FILESERVING_PORT: return settings.FILESERVING_PORT else: return 8000 def get_build_root(self): return settings.BUILD_ROOT def get_fileserving_enabled(self): return settings.FILESERVING_ENABLED def create_worker_record(self, worker_id): now = datetime.now(tz=timezone.utc) obj = Worker( worker_uid = worker_id, hostname = self.get_hostname(), port = self.get_port(), build_root = self.get_build_root(), first_checkin = now, last_checkin = now, fileserving_enabled = self.get_fileserving_enabled() ) obj.save() return obj def update_worker_record(self, worker): now = datetime.now(tz=timezone.utc) worker.hostname = self.get_hostname() worker.port = self.get_port() worker.build_root = self.get_build_root() worker.last_checkin = now worker.fileserving_enabled = self.get_fileserving_enabled() worker.save() return worker def go(self): """ Trigger next stage of pipeline if build was successful """ if not os.path.exists(WORKER_ID_FILE): worker_id = self.create_worker_id() fd = open(WORKER_ID_FILE, "r") fcntl.flock(fd, fcntl.LOCK_EX) worker_id = self.get_worker_id(fd) worker_record = self.get_worker_record(worker_id) if not worker_record: worker_record = self.create_worker_record(worker_id) else: worker_record = self.update_worker_record(worker_record) self.build.worker = worker_record self.build.save() fcntl.flock(fd, fcntl.LOCK_UN)
2.046875
2
tests/test_db_mathes_ui.py
AlexRovan/Python_training
0
11591
<filename>tests/test_db_mathes_ui.py from model.group import Group from model.contact import Contact def test_group_list(app,db): ui_group = app.group.get_groups_list() def clean(group): return Group(id=group.id, name=group.name.strip()) bd_group = map(clean,db.get_group_list()) assert sorted(ui_group,key=Group.id_or_max) == sorted(bd_group, key=Group.id_or_max) def test_contact_list(app,db): ui_contact = app.contact.get_contacts_list() def clean(contact): return Contact(id=contact.id,firstname = ''.join(contact.firstname.split(' ')),lastname = ''.join(contact.lastname.split(' '))) bd_contact = map(clean,db.get_contact_list()) assert sorted(ui_contact,key=Contact.id_or_max) == sorted(bd_contact, key=Contact.id_or_max)
2.5
2
10 Days of Statistics/Day 5 - Normal Distribution I.py
sohammanjrekar/HackerRank
0
11592
<reponame>sohammanjrekar/HackerRank<filename>10 Days of Statistics/Day 5 - Normal Distribution I.py """ Day 5: Normal Distribution I In certain plant, the time taken to assemble a car is a random variable, X having a normal distribution with a mean of 20 hours and a standard deviation of 2 hours. What is the probability that a car can be assembled at this plant in: 1. Less han 19.5 hours? 2. Between 20 and 22 hours? Author: <NAME> """ import math # less than 19.5 hours def cumulative1(mean, std, less): print(round(0.5 * (1 + math.erf((less - mean) / (std * (2 ** 0.5)))), 3)) # Between 20 and 22 hours def cumulative2(mean, std, lower_range, upper_range): print(round(0.5 * (1 + math.erf((upper_range - mean) / (std * (2 ** 0.5)))) - 0.5 * (1 + math.erf((lower_range - mean) / (std * (2 ** 0.5)))), 3)) values = list(map(float, input().split())) mean = values[0] std = values[1] less = float(input()) boundaries = list(map(float, input().split())) lower_range = boundaries[0] upper_range = boundaries[1] cumulative1(mean, std, less) cumulative2(mean, std, lower_range, upper_range)
3.578125
4
11024/11024.py3.py
isac322/BOJ
14
11593
for _ in range(int(input())): print(sum(map(int, input().split())))
2.9375
3
tests/test_contact_form.py
LaudateCorpus1/apostello
69
11594
import pytest from apostello import models @pytest.mark.slow @pytest.mark.django_db class TestContactForm: """Test the sending of SMS.""" def test_number_permissions_staff_exception(self, recipients, users): """Test sending a message now.""" calvin = recipients["calvin"] # check good post: prof = users["staff"].profile prof.can_see_contact_nums = False prof.save() r = users["c_staff"].post( f"/api/v2/recipients/{calvin.pk}/", { "pk": calvin.pk, "first_name": calvin.first_name, "last_name": calvin.last_name, "number": "+447900000000", "do_not_reply": calvin.do_not_reply, }, ) assert r.status_code == 200 calvin.refresh_from_db() assert calvin.number == "+447900000000" def test_number_permissions_no_perm(self, recipients, users): calvin = recipients["calvin"] r = users["c_in"].post( f"/api/v2/recipients/{calvin.pk}/", { "pk": calvin.pk, "first_name": calvin.first_name, "last_name": calvin.last_name, "number": "+447900000000", "do_not_reply": calvin.do_not_reply, }, ) assert r.status_code == 400 assert "You do not have permission to change the number field." in r.json()["errors"]["__all__"] def test_number_permissions_with_perm(self, recipients, users): calvin = recipients["calvin"] # check good post: prof = users["notstaff2"].profile prof.can_see_contact_nums = True prof.save() r = users["c_in"].post( f"/api/v2/recipients/{calvin.pk}/", { "pk": calvin.pk, "first_name": calvin.first_name, "last_name": calvin.last_name, "number": "+447900000001", "do_not_reply": calvin.do_not_reply, }, ) assert r.status_code == 200 calvin.refresh_from_db() assert calvin.number == "+447900000001" def test_notes_permissions_staff_exception(self, recipients, users): """Test sending a message now.""" calvin = recipients["calvin"] # check good post: prof = users["staff"].profile prof.can_see_contact_notes = False prof.save() r = users["c_staff"].post( f"/api/v2/recipients/{calvin.pk}/", { "pk": calvin.pk, "first_name": calvin.first_name, "last_name": calvin.last_name, "number": calvin.number, "do_not_reply": calvin.do_not_reply, "notes": "hi there", }, ) assert r.status_code == 200 calvin.refresh_from_db() assert calvin.notes == "hi there" def test_notes_permissions_no_perm(self, recipients, users): calvin = recipients["calvin"] r = users["c_in"].post( f"/api/v2/recipients/{calvin.pk}/", { "pk": calvin.pk, "first_name": calvin.first_name, "last_name": calvin.last_name, "do_not_reply": calvin.do_not_reply, "notes": "hi there", }, ) assert r.status_code == 400 assert "You do not have permission to change the notes field." in r.json()["errors"]["__all__"] calvin.refresh_from_db() assert not (calvin.notes == "hi there") def test_notes_permissions_with_perm(self, recipients, users): calvin = recipients["calvin"] # check good post: prof = users["notstaff2"].profile prof.can_see_contact_notes = True prof.save() r = users["c_in"].post( f"/api/v2/recipients/{calvin.pk}/", { "pk": calvin.pk, "first_name": calvin.first_name, "last_name": calvin.last_name, "do_not_reply": calvin.do_not_reply, "notes": "something something", }, ) assert r.status_code == 200 calvin.refresh_from_db() assert calvin.notes == "something something"
2.296875
2
tests/unit/records/format_hints.py
cwegrzyn/records-mover
36
11595
<reponame>cwegrzyn/records-mover bluelabs_format_hints = { 'field-delimiter': ',', 'record-terminator': "\n", 'compression': 'GZIP', 'quoting': None, 'quotechar': '"', 'doublequote': False, 'escape': '\\', 'encoding': 'UTF8', 'dateformat': 'YYYY-MM-DD', 'timeonlyformat': 'HH24:MI:SS', 'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF', 'datetimeformat': 'YYYY-MM-DD HH24:MI:SS', 'header-row': False, } csv_format_hints = { 'field-delimiter': ',', 'record-terminator': "\n", 'compression': 'GZIP', 'quoting': 'minimal', 'quotechar': '"', 'doublequote': True, 'escape': None, 'encoding': 'UTF8', 'dateformat': 'MM/DD/YY', 'timeonlyformat': 'HH24:MI:SS', 'datetimeformattz': 'MM/DD/YY HH24:MI', 'datetimeformat': 'MM/DD/YY HH24:MI', 'header-row': True, } vertica_format_hints = { 'field-delimiter': '\001', 'record-terminator': '\002', 'compression': None, 'quoting': None, 'quotechar': '"', 'doublequote': False, 'escape': None, 'encoding': 'UTF8', 'dateformat': 'YYYY-MM-DD', 'timeonlyformat': 'HH24:MI:SS', 'datetimeformat': 'YYYY-MM-DD HH:MI:SS', 'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF', 'header-row': False, }
1.75
2
CONST.py
Bobobert/DQN-Vanilla
0
11596
# FROM THE OP PAPER-ISH MINI_BATCH_SIZE = 32 MEMORY_SIZE = 10**6 BUFFER_SIZE = 100 LHIST = 4 GAMMA = 0.99 UPDATE_FREQ_ONlINE = 4 UPDATE_TARGET = 2500 # This was 10**4 but is measured in actor steps, so it's divided update_freq_online TEST_FREQ = 5*10**4 # Measure in updates TEST_STEPS = 10**4 LEARNING_RATE = 0.00025 G_MOMENTUM = 0.95 EPSILON_INIT = 1.0 EPSILON_FINAL = 0.1 EPSILON_TEST = 0.05 EPSILON_LIFE = 10**6 REPLAY_START = 5*10**4 NO_OP_MAX = 30 UPDATES = 5*10**6 CLIP_REWARD = 1.0 CLIP_ERROR = 1.0 # MISC PLAY_STEPS = 3000 BUFFER_SAMPLES = 20 CROP = (0, -1) FRAMESIZE = [84,84] FRAMESIZETP = (84,84) #DROPS = [0.0,0.15,0.1,0.0] DROPS = [0.0, 0.0, 0.0, 0.0] Games = ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis', 'bank_heist', 'battle_zone', 'beam_rider', 'bowling', 'boxing', 'breakout', 'carnival', 'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar', 'hero', 'ice_hockey', 'jamesbond', 'kangaroo', 'krull', 'kung_fu_master', 'montezuma_revenge', 'ms_pacman', 'name_this_game', 'pong', 'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down', 'venture', 'video_pinball', 'wizard_of_wor', 'zaxxon'] GamesExtras = ['defender','phoenix','berzerk','skiing','yars_revenge','solaris','pitfall',] ACTION_MEANING = { 0: "NOOP", 1: "FIRE", 2: "UP", 3: "RIGHT", 4: "LEFT", 5: "DOWN", 6: "UPRIGHT", 7: "UPLEFT", 8: "DOWNRIGHT", 9: "DOWNLEFT", 10: "UPFIRE", 11: "RIGHTFIRE", 12: "LEFTFIRE", 13: "DOWNFIRE", 14: "UPRIGHTFIRE", 15: "UPLEFTFIRE", 16: "DOWNRIGHTFIRE", 17: "DOWNLEFTFIRE", }
1.335938
1
milieu/paper/methods/milieu.py
seyuboglu/milieu
1
11597
import os import json import logging from collections import defaultdict import numpy as np import networkx as nx import torch from torch.utils.data import DataLoader from torch.optim import Adam from tqdm import tqdm from milieu.util.util import place_on_cpu, place_on_gpu from milieu.paper.methods.method import DPPMethod class MilieuMethod(DPPMethod): """ GCN method class """ def __init__(self, network, diseases, params): super().__init__(network, diseases, params) self.dir = params["dir"] self.adjacency = self.network.adj_matrix self.diseases = diseases self.params = params print(self.params) if self.params.get("load", False): self.load_method() else: self.train_method(diseases) self.curr_fold = None def load_method(self): """ """ logging.info("Loading Params...") with open(os.path.join(self.dir, "params.json")) as f: params = json.load(f)["process_params"]["method_params"] params.update(self.params) self.params = params logging.info("Loading Models...") self.folds_to_models = {} for model_file in os.listdir(os.path.join(self.dir, "models")): split = parse.parse("model_{}.tar", model_file)[0] self.folds_to_models[split] = os.path.join(self.dir, "models", model_file) def train_method(self, diseases): """ """ logging.info("Training Models...") folds_to_diseases = defaultdict(set) for disease in diseases.values(): if disease.split == "none": continue folds_to_diseases[disease.split].add(disease) self.folds_to_models = {} if not(os.path.exists(os.path.join(self.dir, "models"))): os.mkdir(os.path.join(self.dir, "models")) for test_fold in folds_to_diseases.keys(): logging.info("Training model for test {}".format(test_fold)) val_fold = str((int(test_fold) - 1) % len(folds_to_diseases)) test_dataset = DiseaseDataset([disease for disease in folds_to_diseases[test_fold]], self.network) val_dataset = DiseaseDataset([disease for disease in folds_to_diseases[val_fold]], self.network) train_dataset = DiseaseDataset([disease for fold, diseases in folds_to_diseases.items() if fold != test_fold and fold != val_fold for disease in diseases], self.network) # ensure no data leakage assert(not set.intersection(*[test_dataset.get_ids(), train_dataset.get_ids()])) assert(not set.intersection(*[val_dataset.get_ids(), train_dataset.get_ids()])) model = self.train_model(train_dataset, val_dataset) path = os.path.join(self.dir, "models/model_{}.tar".format(test_fold)) torch.save(model.state_dict(), path) self.folds_to_models[test_fold] = path def train_model(self, train_dataset, val_dataset): """ Trains the underlying model """ train_dl = DataLoader(train_dataset, batch_size=self.params["batch_size"], shuffle=True, num_workers=self.params["num_workers"], pin_memory=self.params["cuda"]) dev_dl = DataLoader(val_dataset, batch_size=self.params["batch_size"], shuffle=True, num_workers=self.params["num_workers"], pin_memory=self.params["cuda"]) if self.params["model_class"] == "LCIEmbModule": model = LCIEmbModule(self.params["model_args"], self.network) else: model = LCIModule(self.params, self.adjacency) if self.params["cuda"]: model = model.cuda() optimizer = Adam(model.parameters(), lr=self.params["learning_rate"], weight_decay=self.params["weight_decay"]) logging.info("Starting training for {} epoch(s)".format(self.params["num_epochs"])) model.train() train_and_evaluate( model, train_dl, dev_dl, optimizer, bce_loss, metrics, self.params, self.dir ) model.eval() return model.cpu() def compute_scores(self, train_pos, disease): """ Compute the scores predicted by GCN. Args: """ val_pos = None # Adjacency: Get sparse representation of ppi_adj N, _ = self.adjacency.shape X = torch.zeros(1, N) X[0, train_pos] = 1 if self.params["cuda"]: X = X.cuda() if disease.split != self.curr_fold: if self.params["model_class"] == "LCIEmbModule": model = LCIEmbModule(self.params["model_args"], self.network) else: model = LCIModule(self.params, self.adjacency) model.load_state_dict(torch.load(self.folds_to_models[disease.split])) model.eval() model.cuda() self.curr_model = model self.curr_fold = disease.split Y = self.curr_model(X) scores = Y.cpu().detach().numpy().squeeze() return scores
2.125
2
scripts/telegram_bot.py
luigi311/ArBluna
18
11598
<filename>scripts/telegram_bot.py<gh_stars>10-100 import os import distutils.util from telegram import Update from telegram.ext import Updater, CommandHandler, Filters, CallbackContext from dotenv import load_dotenv from scripts.get_info import get_ratio from scripts.terra import get_balances, execute_swap load_dotenv(override=True) notify_telegram = bool(distutils.util.strtobool(os.getenv("NOTIFY_TELEGRAM"))) if notify_telegram: telegram_chat_id = int(os.getenv("TELEGRAM_CHAT_ID")) token = os.getenv("TELEGRAM_TOKEN") def ping_command(update: Update, context: CallbackContext) -> None: """Send a message when the command /ping is issued.""" update.message.reply_text("pong") def help_command(update: Update, context: CallbackContext) -> None: """Send list of commands when /help is issued.""" update.message.reply_text( "Commands:\n/ping check if thebot is online\n/luna get the bluna -> luna ratio\n/bluna get the luna -> bluna ratio\n/ust get the ust ratio\n/balance get the balances\n/swap_to_bluna_command to force a swap from luna to bluna\n/swap_to_luna_command to force a swap from bluna to luna" ) def bluna_command(update: Update, context: CallbackContext) -> None: """Send the current luna to bluna ratio.""" luna_balance, bluna_balance, ust_balance = get_balances(notify_balance=False) bluna_price = get_ratio("bluna", luna_balance) update.message.reply_text(f"Luna -> bLuna ratio: {bluna_price}") def luna_command(update: Update, context: CallbackContext) -> None: """Send the current luna to bluna ratio.""" luna_balance, bluna_balance, ust_balance = get_balances(notify_balance=False) bluna_price = get_ratio("luna", bluna_balance) update.message.reply_text(f"bLuna -> Luna ratio: {bluna_price}") def ust_command(update: Update, context: CallbackContext) -> None: """Send the current luna to bluna ratio.""" luna_balance, bluna_balance, ust_balance = get_balances(notify_balance=False) ust_price = get_ratio("ust", luna_balance) update.message.reply_text(f"Luna -> UST price: {ust_price}") def balance_command(update: Update, context: CallbackContext) -> None: """Send the current balances of the account.""" get_balances() def swap_to_bluna_command(update: Update, context: CallbackContext) -> None: """Force swap to bluna.""" luna_balance, bluna_balance, ust_balance = get_balances() price = get_ratio("bluna", luna_balance) if luna_balance > 0 and ust_balance > 0.15: execute_swap(luna_balance, "bluna", price) else: raise Exception(f"Not enough Luna {luna_balance} or UST {ust_balance}") def swap_to_luna_command(update: Update, context: CallbackContext) -> None: """Force swap to luna.""" luna_balance, bluna_balance, ust_balance = get_balances() price = get_ratio("luna", bluna_balance) if bluna_balance > 0 and ust_balance > 0.15: execute_swap(bluna_balance, "luna", price) else: raise Exception(f"Not enough bLuna {bluna_balance} or UST {ust_balance}") def setup_bot() -> None: print("Starting up telegram bot") try: # Create the Updater and pass it your bot's token. updater = Updater(token, use_context=True) # Get the dispatcher to register handlers dispatcher = updater.dispatcher # on different commands - answer in Telegram dispatcher.add_handler( CommandHandler( "help", help_command, filters=Filters.chat(chat_id=telegram_chat_id) ) ) dispatcher.add_handler( CommandHandler( "ping", ping_command, filters=Filters.chat(chat_id=telegram_chat_id) ) ) dispatcher.add_handler( CommandHandler( "bluna", bluna_command, filters=Filters.chat(chat_id=telegram_chat_id) ) ) dispatcher.add_handler( CommandHandler( "luna", luna_command, filters=Filters.chat(chat_id=telegram_chat_id) ) ) dispatcher.add_handler( CommandHandler( "ust", ust_command, filters=Filters.chat(chat_id=telegram_chat_id) ) ) dispatcher.add_handler( CommandHandler( "balance", balance_command, filters=Filters.chat(chat_id=telegram_chat_id), ) ) dispatcher.add_handler( CommandHandler( "balances", balance_command, filters=Filters.chat(chat_id=telegram_chat_id), ) ) dispatcher.add_handler( CommandHandler( "swap_to_bluna", swap_to_bluna_command, filters=Filters.chat(chat_id=telegram_chat_id), ) ) dispatcher.add_handler( CommandHandler( "swap_to_luna", swap_to_luna_command, filters=Filters.chat(chat_id=telegram_chat_id), ) ) # Start the Bot updater.start_polling() except Exception as e: raise Exception(f"Telegram bot error: {e}")
2.46875
2
envs/mujoco/utils/download_meshes.py
hzm2016/assistive-gym-robosuite
1
11599
<reponame>hzm2016/assistive-gym-robosuite import os import zipfile import requests def check_and_download(name, google_id, files=None, force_download=False): """ Checks if the meshes folder exists in the xml directory If not it will ask the user if they want to download them to be able to proceed Parameters ---------- name: string the file or directory to download google_id: string the google id that points to the location of the zip file. This should be stored in the xml or config file force_download: boolean, Optional (Default: False) True to skip checking if the file or folder exists """ files_missing = False if force_download: files_missing = True else: # check if the provided name is a file or folder if not os.path.isfile(name) and not os.path.isdir(name): print("Checking for mesh files in : ", name) files_missing = True elif files is not None: mesh_files = [ f for f in os.listdir(name) if os.path.isfile(os.path.join(name, f)) ] # files_missing = all(elem in sorted(mesh_files) for elem in sorted(files)) files_missing = set(files).difference(set(mesh_files)) if files_missing: print("Checking for mesh files in : ", name) print("The following files are missing: ", files_missing) if files_missing: yes = ["y", "yes"] no = ["n", "no"] answered = False question = "Download mesh and texture files to run sim? (y/n): " while not answered: reply = str(input(question)).lower().strip() if reply[0] in yes: print("Downloading files...") name = name.split("/") name = "/".join(s for s in name[:-1]) download_files(google_id, name + "/tmp") print("Sim files saved to %s" % name) answered = True elif reply[0] in no: raise Exception("Please download the required files to run the demo") else: question = "Please Enter (y/n) " def download_files(google_id, destination): def _get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith("download_warning"): return value return None def _save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) def _extract_zip_files(zip_file): zip_file = "%s" % zip_file zipball = zipfile.ZipFile(zip_file) zipball.extractall(zip_file.split("tmp")[0]) zipball.close() os.remove(zip_file) URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={"id": google_id}, stream=True) print(response) token = _get_confirm_token(response) if token: params = {"id": google_id, "confirm": token} response = session.get(URL, params=params, stream=True) _save_response_content(response, destination) _extract_zip_files(destination) if __name__ == "__main__": check_and_download('/home/zhimin/code/6_all_old_code/0_abr_control/abr_control/arms/', '1doam-DgkW7OSPnwWZQM84edzX84ot-GK', files=None, force_download=True) # download_files('1SjWRUl-D1FZ5fB2cy4jF4X9wTsQ5LWzo', '/home/zhimin/code/6_all_old_code/0_abr_control/abr_control/arms')
3.234375
3