commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
bab1ad914ab9273aa8ab905edef2578b5c760f31
add django installed apps init opps core
jeanmask/opps,williamroot/opps,opps/opps,opps/opps,williamroot/opps,opps/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,williamroot/opps,opps/opps,YACOWS/opps,YACOWS/opps,YACOWS/opps,YACOWS/opps,jeanmask/opps
opps/core/__init__.py
opps/core/__init__.py
# -*- coding: utf-8 -*- from django.utils.translation import ugettext_lazy as _ from django.conf import settings trans_app_label = _('Opps') settings.INSTALLED_APPS += ('opps.article', 'opps.image', 'opps.channel', 'opps.source', 'redactor', 'tagging',) settings.REDACTOR_OPTIONS = {'lang': 'en'} settings.REDACTOR_UPLOAD = 'uploads/'
# -*- coding: utf-8 -*- from django.utils.translation import ugettext_lazy as _ trans_app_label = _('Opps')
mit
Python
de29012d0bf48cf970ad37c62d7db960161f14c0
Remove unused stat import
kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io
core/dovecot/start.py
core/dovecot/start.py
#!/usr/bin/python3 import os import glob import multiprocessing import logging as log import sys from podop import run_server from socrate import system, conf log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) def start_podop(): os.setuid(8) url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§" run_server(0, "dovecot", "/tmp/podop.socket", [ ("quota", "url", url ), ("auth", "url", url), ("sieve", "url", url), ]) # Actual startup script os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis") os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334") if os.environ["WEBMAIL"] != "none": os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail") for dovecot_file in glob.glob("/conf/*.conf"): conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) os.makedirs("/conf/bin", exist_ok=True) for script_file in glob.glob("/conf/*.script"): out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script','')) conf.jinja(script_file, os.environ, out_file) os.chmod(out_file, 0o555) # Run Podop, then postfix multiprocessing.Process(target=start_podop).start() os.system("chown mail:mail /mail") os.system("chown -R mail:mail /var/lib/dovecot /conf") os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
#!/usr/bin/python3 import os import stat import glob import multiprocessing import logging as log import sys from podop import run_server from socrate import system, conf log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) def start_podop(): os.setuid(8) url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§" run_server(0, "dovecot", "/tmp/podop.socket", [ ("quota", "url", url ), ("auth", "url", url), ("sieve", "url", url), ]) # Actual startup script os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis") os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334") if os.environ["WEBMAIL"] != "none": os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail") for dovecot_file in glob.glob("/conf/*.conf"): conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) os.makedirs("/conf/bin", exist_ok=True) for script_file in glob.glob("/conf/*.script"): out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script','')) conf.jinja(script_file, os.environ, out_file) os.chmod(out_file, 0o555) # Run Podop, then postfix multiprocessing.Process(target=start_podop).start() os.system("chown mail:mail /mail") os.system("chown -R mail:mail /var/lib/dovecot /conf") os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
mit
Python
6593fe983b40a5d4c467cb2ea0847e10e5faae1c
Update Beck_Pang_First_Python_practice_2.7.py
Beck-Sisyphus/Robocup,Beck-Sisyphus/Robocup,Beck-Sisyphus/Robocup,Beck-Sisyphus/Robocup,Beck-Sisyphus/Robocup
Coding_practice/Beck_Pang_First_Python_practice_2.7.py
Coding_practice/Beck_Pang_First_Python_practice_2.7.py
""" Beck Pang 25/07/2014 First practice project for our summer robotics team """ import random def name_to_number (name): # This helper function converts the string name into a number between 0 an 4 # pre: take a name in String as a parameter # post: return an represented number in integer if (name == "rock"): return 0 elif (name == "Spock"): return 1 elif (name == "paper"): return 2 elif (name == "lizard"): return 3 elif (name == "scissors"): return 4 else: print ("This name is not included in this game.\n") def number_to_name (number): # pre: take a number in integer as a parameter # post: return a name in String if (number == 0): return "rock" elif (number == 1): return "Spock" elif (number == 2): return "paper" elif (number == 3): return "lizard" elif (number == 4): return "scissors" else: return "no word found" def rpsls (player_choice): # This function operate the main functionality # pre: take a player's choice in String as a parameter # post: print the player and computer's choices in the console # and show the result print ("\n") player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_name = number_to_name(comp_number) print ("Player chooses " + player_choice + "\n") print ("Computer chooses " + comp_name + "\n") difference = (comp_number - player_number) % 5 if (difference == 0): print ("Player and computer tie!") elif (difference >= 2): print ("Player wins!") else: print ("Computer wins!") """ There is no main function in this game Please play this game in the console. """
""" Beck Pang 25/07/2014 First practice project for our summer robotics team """ import random def name_to_number (name): # This helper function converts the string name into a number between 0 an 4 # pre: take a name in String as a parameter # post: return an represented number in integer if (name == "rock"): return 0 elif (name == "Spock"): return 1 elif (name == "paper"): return 2 elif (name == "lizard"): return 3 elif (name ==hel "scissors"): return 4 else: print ("This name is not included in this game.\n") def number_to_name (number): # pre: take a number in integer as a parameter # post: return a name in String if (number == 0): return "rock" elif (number == 1): return "Spock" elif (number == 2): return "paper" elif (number == 3): return "lizard" elif (number == 4): return "scissors" else: return "no word found" def rpsls (player_choice): # This function operate the main functionality # pre: take a player's choice in String as a parameter # post: print the player and computer's choices in the console # and show the result print ("\n") player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_name = number_to_name(comp_number) print ("Player chooses " + player_choice + "\n") print ("Computer chooses " + comp_name + "\n") difference = (comp_number - player_number) % 5 if (difference == 0): print ("Player and computer tie!") elif (difference >= 2): print ("Player wins!") else: print ("Computer wins!") """ There is no main function in this game Please play this game in the console. """
mit
Python
8483de37aad2256266deb404ce4d9eaae31a8142
Remove backend
conversationai/conversationai-models,conversationai/conversationai-models
kaggle-classification/keras_trainer/rnn.py
kaggle-classification/keras_trainer/rnn.py
"""RNN""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras.layers import Input, GRU, Dense, Embedding, Dropout, Bidirectional, TimeDistributed, Multiply, Flatten, Reshape, Dot from keras.models import Model from keras_trainer import base_model from keras_trainer.custom_metrics import auc_roc class RNNModel(base_model.BaseModel): """ RNN hparams: embedding_dim vocab_size train_embedding """ def __init__(self, embeddings_matrix, hparams): self.embeddings_matrix = embeddings_matrix self.hparams = hparams def get_model(self): sequence_length = self.hparams.sequence_length I = Input(shape=(sequence_length,), dtype='float32') E = Embedding( self.hparams.vocab_size, self.hparams.embedding_dim, weights=[self.embeddings_matrix], input_length=sequence_length, trainable=self.hparams.train_embedding)( I) H = Bidirectional(GRU(128, return_sequences=True))(E) A = TimeDistributed(Dense(3), input_shape=(sequence_length, 256))(H) A = Flatten()(A) A = Dense(sequence_length, activation='softmax')(A) X = Dot((1, 1))([H, A]) X = Dense(128, activation='relu')(X) X = Dropout(self.hparams.dropout_rate)(X) Output = Dense(6, activation='sigmoid')(X) model = Model(inputs=I, outputs=Output) model.compile( optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy', auc_roc]) print(model.summary()) return model
"""RNN""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras.layers import Input, GRU, Dense, Embedding, Dropout, Bidirectional, TimeDistributed, Multiply, Flatten, Reshape, Dot from keras.models import Model from keras_trainer import base_model from keras_trainer.custom_metrics import auc_roc import keras.backend as K class RNNModel(base_model.BaseModel): """ RNN hparams: embedding_dim vocab_size train_embedding """ def __init__(self, embeddings_matrix, hparams): self.embeddings_matrix = embeddings_matrix self.hparams = hparams def get_model(self): sequence_length = self.hparams.sequence_length I = Input(shape=(sequence_length,), dtype='float32') E = Embedding( self.hparams.vocab_size, self.hparams.embedding_dim, weights=[self.embeddings_matrix], input_length=sequence_length, trainable=self.hparams.train_embedding)( I) H = Bidirectional(GRU(128, return_sequences=True))(E) A = TimeDistributed(Dense(3), input_shape=(sequence_length, 256))(H) A = Flatten()(A) A = Dense(sequence_length, activation='softmax')(A) X = Dot((1, 1))([H, A]) X = Dense(128, activation='relu')(X) X = Dropout(self.hparams.dropout_rate)(X) Output = Dense(6, activation='sigmoid')(X) model = Model(inputs=I, outputs=Output) model.compile( optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy', auc_roc]) print(model.summary()) return model
apache-2.0
Python
7fd0ed0897ffedf117698502cdefac0436ac4f2c
remove MotechTab import
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/tabs/config.py
corehq/tabs/config.py
from corehq.apps.styleguide.tabs import SGExampleTab, SimpleCrispyFormSGExample, \ ControlsDemoSGExample from corehq.tabs.tabclasses import DashboardTab, ProjectReportsTab, ProjectInfoTab, SetupTab, \ ProjectDataTab, ApplicationsTab, CloudcareTab, MessagingTab, ProjectUsersTab, \ AdminTab, IndicatorAdminTab, SMSAdminTab, AccountingTab, ProjectSettingsTab, \ MySettingsTab MENU_TABS = ( DashboardTab, ProjectInfoTab, ProjectReportsTab, IndicatorAdminTab, ProjectDataTab, SetupTab, ProjectUsersTab, ApplicationsTab, CloudcareTab, MessagingTab, # invisible ProjectSettingsTab, MySettingsTab, # Admin AdminTab, SMSAdminTab, AccountingTab, # Styleguide SGExampleTab, SimpleCrispyFormSGExample, ControlsDemoSGExample, )
from corehq.apps.styleguide.tabs import SGExampleTab, SimpleCrispyFormSGExample, \ ControlsDemoSGExample from corehq.tabs.tabclasses import DashboardTab, ProjectReportsTab, ProjectInfoTab, SetupTab, \ ProjectDataTab, ApplicationsTab, CloudcareTab, MessagingTab, ProjectUsersTab, \ AdminTab, IndicatorAdminTab, SMSAdminTab, AccountingTab, ProjectSettingsTab, \ MySettingsTab, MotechTab MENU_TABS = ( DashboardTab, ProjectInfoTab, ProjectReportsTab, IndicatorAdminTab, ProjectDataTab, SetupTab, ProjectUsersTab, ApplicationsTab, CloudcareTab, MessagingTab, MotechTab, # invisible ProjectSettingsTab, MySettingsTab, # Admin AdminTab, SMSAdminTab, AccountingTab, # Styleguide SGExampleTab, SimpleCrispyFormSGExample, ControlsDemoSGExample, )
bsd-3-clause
Python
0185a30a340fae956c0e5b9d9f354e56e2e2178a
update the wsgi file
dstufft/jutils
crate_project/wsgi.py
crate_project/wsgi.py
import os import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "apps"))) import newrelic.agent newrelic.agent.initialize() from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
import newrelic.agent newrelic.agent.initialize() import pinax.env from django.core.wsgi import get_wsgi_application # setup the environment for Django and Pinax pinax.env.setup_environ(__file__) # set application for WSGI processing application = get_wsgi_application()
bsd-2-clause
Python
a0fab69d12d64d4e5371fcb26a4ec70365a76fa6
Move task results database to data dir
mchelem/cref2,mchelem/cref2,mchelem/cref2
cref/app/web/tasks.py
cref/app/web/tasks.py
from celery import Celery from cref.app.terminal import run_cref app = Celery( 'tasks', backend='db+sqlite:///data/results.sqlite', broker='amqp://guest@localhost//' ) @app.task def predict_structure(sequence, params={}): return run_cref(sequence)
from celery import Celery from cref.app.terminal import run_cref app = Celery( 'tasks', backend='db+sqlite:///results.sqlite', broker='amqp://guest@localhost//' ) @app.task def predict_structure(sequence, params={}): return run_cref(sequence)
mit
Python
6e287393ad87ad09f94f845d372b5835ad4ebaba
Increase plot range
Koheron/zynq-sdk,Koheron/zynq-sdk,Koheron/zynq-sdk,Koheron/zynq-sdk
examples/alpha250-4/adc-bram/test.py
examples/alpha250-4/adc-bram/test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import os import time from adc_bram import AdcBram from koheron import connect import matplotlib matplotlib.use('TKAgg') from matplotlib import pyplot as plt from matplotlib.lines import Line2D host = os.getenv('HOST', '192.168.1.50') client = connect(host, 'adc-bram', restart=False) driver = AdcBram(client) print('ADC size = {}'.format(driver.adc_size)) driver.set_reference_clock(0) # External time.sleep(5) clk_200MHz = {'idx': 0, 'fs': 200E6} clk_250MHz = {'idx': 1, 'fs': 250E6} clock = clk_250MHz driver.set_sampling_frequency(clock['idx']) # driver.phase_shift(0) t = np.arange(driver.adc_size) / clock['fs'] t_us = 1e6 * t # Dynamic plot fig = plt.figure() ax = fig.add_subplot(111) y = np.zeros(driver.adc_size) line0 = Line2D([], [], color='blue', label='IN0') line1 = Line2D([], [], color='green', label='IN1') line2 = Line2D([], [], color='red', label='IN2') line3 = Line2D([], [], color='cyan', label='IN3') ax.add_line(line0) ax.add_line(line1) ax.add_line(line2) ax.add_line(line3) ax.set_xlabel('Time (us)') ax.set_ylabel('ADC Raw data') ax.set_xlim((t_us[0], t_us[-1])) # ax.set_ylim((-2**15, 2**15)) ax.set_ylim((-32768, 32768)) ax.legend(loc='upper right') fig.canvas.draw() while True: try: driver.trigger_acquisition() time.sleep(0.1) driver.get_adc(0) driver.get_adc(1) line0.set_data(t_us, driver.adc0[0,:]) line1.set_data(t_us, driver.adc0[1,:]) line2.set_data(t_us, driver.adc1[0,:]) line3.set_data(t_us, driver.adc1[1,:]) fig.canvas.draw() plt.pause(0.001) # plt.pause(3600) except KeyboardInterrupt: break
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import os import time from adc_bram import AdcBram from koheron import connect import matplotlib matplotlib.use('TKAgg') from matplotlib import pyplot as plt from matplotlib.lines import Line2D host = os.getenv('HOST', '192.168.1.50') client = connect(host, 'adc-bram', restart=False) driver = AdcBram(client) print('ADC size = {}'.format(driver.adc_size)) driver.set_reference_clock(0) # External time.sleep(5) clk_200MHz = {'idx': 0, 'fs': 200E6} clk_250MHz = {'idx': 1, 'fs': 250E6} clock = clk_250MHz driver.set_sampling_frequency(clock['idx']) # driver.phase_shift(0) t = np.arange(driver.adc_size) / clock['fs'] t_us = 1e6 * t # Dynamic plot fig = plt.figure() ax = fig.add_subplot(111) y = np.zeros(driver.adc_size) line0 = Line2D([], [], color='blue', label='IN0') line1 = Line2D([], [], color='green', label='IN1') line2 = Line2D([], [], color='red', label='IN2') line3 = Line2D([], [], color='cyan', label='IN3') ax.add_line(line0) ax.add_line(line1) ax.add_line(line2) ax.add_line(line3) ax.set_xlabel('Time (us)') ax.set_ylabel('ADC Raw data') ax.set_xlim((t_us[0], t_us[-1])) # ax.set_ylim((-2**15, 2**15)) ax.set_ylim((-300, 300)) ax.legend(loc='upper right') fig.canvas.draw() while True: try: driver.trigger_acquisition() time.sleep(0.1) driver.get_adc(0) driver.get_adc(1) line0.set_data(t_us, driver.adc0[0,:]) line1.set_data(t_us, driver.adc0[1,:]) line2.set_data(t_us, driver.adc1[0,:]) line3.set_data(t_us, driver.adc1[1,:]) fig.canvas.draw() plt.pause(0.001) # plt.pause(3600) except KeyboardInterrupt: break
mit
Python
63b828983b38eb00e68683c19c51f444102a030d
support p3k on python file
sherzberg/vim-bootstrap-updater
plugin/vim_bootstrap_updater.py
plugin/vim_bootstrap_updater.py
import os try: import urllib2 import urllib except ImportError: import urllib.request as urllib2 import urllib.parse as urllib def vimrc_path(editor): return os.path.expanduser('~/.%src' % editor) def _generate_vimrc(editor, langs): params = [('langs', l.strip()) for l in langs] params.append(('editor', editor)) data = urllib.urlencode(params) resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/generate.vim", data) return resp.read() def get_available_langs(): resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/langs") return resp.read().decode('utf-8') def update(vimrc, editor, langs): content = _generate_vimrc(editor, langs) vimrc = os.path.expanduser(vimrc) with open(vimrc, 'w') as fh: fh.write(str(content)) return content
import os import urllib import urllib2 def vimrc_path(editor): return os.path.expanduser('~/.%src' % editor) def _generate_vimrc(editor, langs): params = [('langs', l.strip()) for l in langs] params.append(('editor', editor)) data = urllib.urlencode(params) resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/generate.vim", data) return resp.read() def get_available_langs(): resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/langs") return resp.read() def update(vimrc, editor, langs): content = _generate_vimrc(editor, langs) vimrc = os.path.expanduser(vimrc) with open(vimrc, 'w') as fh: fh.write(str(content)) return content
mit
Python
900d872d4d1f8a593f25ac982e48ac86660955fd
Store name unique
meghabhoj/NEWBAZAAR,meghabhoj/NEWBAZAAR,meghabhoj/NEWBAZAAR,evonove/django-bazaar,evonove/django-bazaar,evonove/django-bazaar
bazaar/listings/models.py
bazaar/listings/models.py
from __future__ import unicode_literals from django.db import models from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from ..fields import MoneyField from ..goods.models import Product @python_2_unicode_compatible class Listing(models.Model): title = models.CharField(max_length=100) description = models.TextField(max_length=500, blank=True) sales_units = models.IntegerField(default=1) # TODO: this should become a gallery image = models.ImageField(upload_to="listing_images") product = models.ManyToManyField(Product, related_name="listings") def __str__(self): return self.title @python_2_unicode_compatible class Store(models.Model): name = models.CharField(max_length=100, unique=True) url = models.URLField(blank=True) def __str__(self): return self.name @python_2_unicode_compatible class Publishing(models.Model): external_id = models.CharField(max_length=128) price = MoneyField() available_units = models.IntegerField() published = models.BooleanField(default=False) last_update = models.DateTimeField(default=timezone.now) listing = models.ForeignKey(Listing, related_name="publishings") store = models.ForeignKey(Store, related_name="publishings") def __str__(self): return "Publishing %s on %s" % (self.external_id, self.store)
from __future__ import unicode_literals from django.db import models from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from ..fields import MoneyField from ..goods.models import Product @python_2_unicode_compatible class Listing(models.Model): title = models.CharField(max_length=100) description = models.TextField(max_length=500, blank=True) sales_units = models.IntegerField(default=1) # TODO: this should become a gallery image = models.ImageField(upload_to="listing_images") product = models.ManyToManyField(Product, related_name="listings") def __str__(self): return self.title @python_2_unicode_compatible class Store(models.Model): name = models.CharField(max_length=100) url = models.URLField(blank=True) def __str__(self): return self.name @python_2_unicode_compatible class Publishing(models.Model): external_id = models.CharField(max_length=128) price = MoneyField() available_units = models.IntegerField() published = models.BooleanField(default=False) last_update = models.DateTimeField(default=timezone.now) listing = models.ForeignKey(Listing, related_name="publishings") store = models.ForeignKey(Store, related_name="publishings") def __str__(self): return "Publishing %s on %s" % (self.external_id, self.store)
bsd-2-clause
Python
19df232461679b3156f9d5889d59f095e0b97d60
Add CAN_DETECT
damngamerz/coala-bears,sounak98/coala-bears,Vamshi99/coala-bears,coala/coala-bears,damngamerz/coala-bears,LWJensen/coala-bears,Asnelchristian/coala-bears,Vamshi99/coala-bears,horczech/coala-bears,meetmangukiya/coala-bears,Shade5/coala-bears,damngamerz/coala-bears,Asnelchristian/coala-bears,Shade5/coala-bears,meetmangukiya/coala-bears,Vamshi99/coala-bears,madhukar01/coala-bears,LWJensen/coala-bears,gs0510/coala-bears,yash-nisar/coala-bears,refeed/coala-bears,coala/coala-bears,naveentata/coala-bears,srisankethu/coala-bears,aptrishu/coala-bears,mr-karan/coala-bears,horczech/coala-bears,seblat/coala-bears,Vamshi99/coala-bears,dosarudaniel/coala-bears,aptrishu/coala-bears,sounak98/coala-bears,vijeth-aradhya/coala-bears,aptrishu/coala-bears,ku3o/coala-bears,srisankethu/coala-bears,ankit01ojha/coala-bears,shreyans800755/coala-bears,Shade5/coala-bears,shreyans800755/coala-bears,aptrishu/coala-bears,kaustubhhiware/coala-bears,yash-nisar/coala-bears,gs0510/coala-bears,Vamshi99/coala-bears,arjunsinghy96/coala-bears,dosarudaniel/coala-bears,horczech/coala-bears,madhukar01/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,refeed/coala-bears,damngamerz/coala-bears,gs0510/coala-bears,damngamerz/coala-bears,chriscoyfish/coala-bears,srisankethu/coala-bears,dosarudaniel/coala-bears,sounak98/coala-bears,arjunsinghy96/coala-bears,incorrectusername/coala-bears,ku3o/coala-bears,Vamshi99/coala-bears,ku3o/coala-bears,vijeth-aradhya/coala-bears,ankit01ojha/coala-bears,yashtrivedi96/coala-bears,LWJensen/coala-bears,meetmangukiya/coala-bears,madhukar01/coala-bears,aptrishu/coala-bears,yash-nisar/coala-bears,shreyans800755/coala-bears,naveentata/coala-bears,LWJensen/coala-bears,LWJensen/coala-bears,sounak98/coala-bears,srisankethu/coala-bears,yashtrivedi96/coala-bears,dosarudaniel/coala-bears,horczech/coala-bears,meetmangukiya/coala-bears,chriscoyfish/coala-bears,arjunsinghy96/coala-bears,LWJensen/coala-bears,refeed/coala-bears,madhukar01/coala-bears,refeed/coala-bears,kaustubhhiware/coala-bears,vijeth-aradhya/coala-bears,kaustubhhiware/coala-bears,damngamerz/coala-bears,ku3o/coala-bears,coala/coala-bears,seblat/coala-bears,gs0510/coala-bears,gs0510/coala-bears,yash-nisar/coala-bears,naveentata/coala-bears,arjunsinghy96/coala-bears,Shade5/coala-bears,gs0510/coala-bears,ankit01ojha/coala-bears,yashtrivedi96/coala-bears,aptrishu/coala-bears,vijeth-aradhya/coala-bears,seblat/coala-bears,Shade5/coala-bears,kaustubhhiware/coala-bears,SanketDG/coala-bears,meetmangukiya/coala-bears,aptrishu/coala-bears,refeed/coala-bears,arjunsinghy96/coala-bears,coala/coala-bears,aptrishu/coala-bears,ku3o/coala-bears,Shade5/coala-bears,refeed/coala-bears,horczech/coala-bears,meetmangukiya/coala-bears,coala-analyzer/coala-bears,ku3o/coala-bears,yash-nisar/coala-bears,dosarudaniel/coala-bears,yashtrivedi96/coala-bears,meetmangukiya/coala-bears,chriscoyfish/coala-bears,aptrishu/coala-bears,ankit01ojha/coala-bears,srisankethu/coala-bears,SanketDG/coala-bears,madhukar01/coala-bears,yash-nisar/coala-bears,horczech/coala-bears,shreyans800755/coala-bears,shreyans800755/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,Asnelchristian/coala-bears,yash-nisar/coala-bears,shreyans800755/coala-bears,LWJensen/coala-bears,horczech/coala-bears,yash-nisar/coala-bears,ankit01ojha/coala-bears,ankit01ojha/coala-bears,Asnelchristian/coala-bears,SanketDG/coala-bears,naveentata/coala-bears,Vamshi99/coala-bears,horczech/coala-bears,kaustubhhiware/coala-bears,Vamshi99/coala-bears,Shade5/coala-bears,coala/coala-bears,Asnelchristian/coala-bears,incorrectusername/coala-bears,coala/coala-bears,incorrectusername/coala-bears,srisankethu/coala-bears,Vamshi99/coala-bears,yash-nisar/coala-bears,srisankethu/coala-bears,horczech/coala-bears,coala-analyzer/coala-bears,yashtrivedi96/coala-bears,refeed/coala-bears,sounak98/coala-bears,Asnelchristian/coala-bears,seblat/coala-bears,Vamshi99/coala-bears,naveentata/coala-bears,Asnelchristian/coala-bears,madhukar01/coala-bears,coala/coala-bears,yashtrivedi96/coala-bears,ku3o/coala-bears,gs0510/coala-bears,incorrectusername/coala-bears,chriscoyfish/coala-bears,seblat/coala-bears,incorrectusername/coala-bears,seblat/coala-bears,madhukar01/coala-bears,coala-analyzer/coala-bears,mr-karan/coala-bears,dosarudaniel/coala-bears,mr-karan/coala-bears,Shade5/coala-bears,vijeth-aradhya/coala-bears,dosarudaniel/coala-bears,SanketDG/coala-bears,kaustubhhiware/coala-bears,coala/coala-bears,damngamerz/coala-bears,ankit01ojha/coala-bears,ku3o/coala-bears,naveentata/coala-bears,chriscoyfish/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,vijeth-aradhya/coala-bears,chriscoyfish/coala-bears,coala-analyzer/coala-bears,kaustubhhiware/coala-bears,coala/coala-bears,horczech/coala-bears,ku3o/coala-bears,gs0510/coala-bears,yash-nisar/coala-bears,chriscoyfish/coala-bears,sounak98/coala-bears,vijeth-aradhya/coala-bears,ankit01ojha/coala-bears,SanketDG/coala-bears,refeed/coala-bears,shreyans800755/coala-bears,ankit01ojha/coala-bears,seblat/coala-bears,coala-analyzer/coala-bears,incorrectusername/coala-bears,yashtrivedi96/coala-bears,srisankethu/coala-bears,refeed/coala-bears,arjunsinghy96/coala-bears,sounak98/coala-bears,sounak98/coala-bears,coala-analyzer/coala-bears,seblat/coala-bears,mr-karan/coala-bears,Asnelchristian/coala-bears,naveentata/coala-bears,ankit01ojha/coala-bears,ankit01ojha/coala-bears,mr-karan/coala-bears,coala/coala-bears,srisankethu/coala-bears,Shade5/coala-bears,LWJensen/coala-bears,coala/coala-bears,coala/coala-bears,dosarudaniel/coala-bears,aptrishu/coala-bears,SanketDG/coala-bears,yash-nisar/coala-bears,refeed/coala-bears,incorrectusername/coala-bears,damngamerz/coala-bears,arjunsinghy96/coala-bears,mr-karan/coala-bears,refeed/coala-bears,sounak98/coala-bears,gs0510/coala-bears,naveentata/coala-bears,incorrectusername/coala-bears,Vamshi99/coala-bears,horczech/coala-bears,damngamerz/coala-bears,meetmangukiya/coala-bears,arjunsinghy96/coala-bears,yashtrivedi96/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,coala-analyzer/coala-bears,damngamerz/coala-bears,mr-karan/coala-bears,SanketDG/coala-bears,coala-analyzer/coala-bears,kaustubhhiware/coala-bears,SanketDG/coala-bears,coala-analyzer/coala-bears,Asnelchristian/coala-bears,arjunsinghy96/coala-bears,kaustubhhiware/coala-bears,srisankethu/coala-bears,yashtrivedi96/coala-bears,madhukar01/coala-bears,meetmangukiya/coala-bears,madhukar01/coala-bears,srisankethu/coala-bears,naveentata/coala-bears,dosarudaniel/coala-bears,mr-karan/coala-bears,chriscoyfish/coala-bears,shreyans800755/coala-bears,SanketDG/coala-bears,LWJensen/coala-bears
bears/yml/RAMLLintBear.py
bears/yml/RAMLLintBear.py
from coalib.bearlib.abstractions.Linter import linter from coalib.bears.requirements.NpmRequirement import NpmRequirement @linter(executable='ramllint', output_format='regex', output_regex=r'(?P<severity>error|warning|info).*\n (?P<message>.+) ' r'\[(?P<origin>.+)\]') class RAMLLintBear: """ RAML Linter is a static analysis, linter-like, utility that will enforce rules on a given RAML document, ensuring consistency and quality. Note: Files should not have leading empty lines, else the bear fails to identify the problems correctly. """ LANGUAGES = {"RAML"} REQUIREMENTS = {NpmRequirement('ramllint', '1.2.2')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Syntax', 'Formatting'} @staticmethod def create_arguments(filename, file, config_file): return filename,
from coalib.bearlib.abstractions.Linter import linter from coalib.bears.requirements.NpmRequirement import NpmRequirement @linter(executable='ramllint', output_format='regex', output_regex=r'(?P<severity>error|warning|info).*\n (?P<message>.+) ' r'\[(?P<origin>.+)\]') class RAMLLintBear: """ RAML Linter is a static analysis, linter-like, utility that will enforce rules on a given RAML document, ensuring consistency and quality. Note: Files should not have leading empty lines, else the bear fails to identify the problems correctly. """ LANGUAGES = {"RAML"} REQUIREMENTS = {NpmRequirement('ramllint', '1.2.2')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' @staticmethod def create_arguments(filename, file, config_file): return filename,
agpl-3.0
Python
824a2a547218febf61aed8d99eff5ddeeaf6f5ca
Remove unused imports
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
polyaxon/libs/models.py
polyaxon/libs/models.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from django.db import models from django.core.cache import cache class DescribableModel(models.Model): description = models.TextField(blank=True, null=True) class Meta: abstract = True class DiffModel(models.Model): created_at = models.DateTimeField(auto_now_add=True, db_index=True) updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True class TypeModel(models.Model): name = models.CharField(max_length=128, unique=True) schema_definition = models.TextField() class Meta: abstract = True def __str__(self): return self.name class Singleton(DiffModel): """A base model to represents a singleton.""" class Meta: abstract = True def set_cache(self): cache.set(self.__class__.__name__, self) def save(self, *args, **kwargs): self.pk = 1 super(Singleton, self).save(*args, **kwargs) self.set_cache() def delete(self, *args, **kwargs): pass @classmethod def may_be_update(cls, obj): raise NotImplementedError @classmethod def load(cls): raise NotImplementedError
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from django.core.validators import validate_slug from django.db import models from django.core.cache import cache from libs.blacklist import validate_blacklist_name class DescribableModel(models.Model): description = models.TextField(blank=True, null=True) class Meta: abstract = True class DiffModel(models.Model): created_at = models.DateTimeField(auto_now_add=True, db_index=True) updated_at = models.DateTimeField(auto_now=True) class Meta: abstract = True class TypeModel(models.Model): name = models.CharField(max_length=128, unique=True) schema_definition = models.TextField() class Meta: abstract = True def __str__(self): return self.name class Singleton(DiffModel): """A base model to represents a singleton.""" class Meta: abstract = True def set_cache(self): cache.set(self.__class__.__name__, self) def save(self, *args, **kwargs): self.pk = 1 super(Singleton, self).save(*args, **kwargs) self.set_cache() def delete(self, *args, **kwargs): pass @classmethod def may_be_update(cls, obj): raise NotImplementedError @classmethod def load(cls): raise NotImplementedError
apache-2.0
Python
814cc6cef757c3eef775240c749a098b1288eef3
Enable searching for an image in the admin
ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,mysociety/pombola,ken-muturi/pombola,geoffkilpin/pombola,geoffkilpin/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola
pombola/images/admin.py
pombola/images/admin.py
from django.contrib import admin from django.contrib.contenttypes.generic import GenericTabularInline from sorl.thumbnail import get_thumbnail from sorl.thumbnail.admin import AdminImageMixin from pombola.images import models class ImageAdmin(AdminImageMixin, admin.ModelAdmin): list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ] search_fields = ['person__legal_name', 'id', 'source'] def thumbnail(self, obj): if obj.image: im = get_thumbnail(obj.image, '100x100') return '<img src="%s" />' % ( im.url ) else: return "NO IMAGE FOUND" thumbnail.allow_tags = True class ImageAdminInline(AdminImageMixin, GenericTabularInline): model = models.Image extra = 0 can_delete = True admin.site.register( models.Image, ImageAdmin )
from django.contrib import admin from django.contrib.contenttypes.generic import GenericTabularInline from sorl.thumbnail import get_thumbnail from sorl.thumbnail.admin import AdminImageMixin from pombola.images import models class ImageAdmin(AdminImageMixin, admin.ModelAdmin): list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ] def thumbnail(self, obj): if obj.image: im = get_thumbnail(obj.image, '100x100') return '<img src="%s" />' % ( im.url ) else: return "NO IMAGE FOUND" thumbnail.allow_tags = True class ImageAdminInline(AdminImageMixin, GenericTabularInline): model = models.Image extra = 0 can_delete = True admin.site.register( models.Image, ImageAdmin )
agpl-3.0
Python
c99d5d30a698aafe3e554c48c9a47dd8be1a5575
Use imap instead of map
jogo/gerrit-fun
library.py
library.py
import json import logging import os import subprocess import urllib import grequests import numpy logging.basicConfig() logger = logging.getLogger("recheck") logger.setLevel(logging.DEBUG) def get_change_ids(repo_path, subtree=None, since="6.months"): """Return array of change-Ids of merged patches. returns list starting with most recent change repo_path: file path of repo since: how far back to look """ change_ids = [] cwd = os.getcwd() os.chdir(repo_path) command = "git log --no-merges --since=%s master" % since if subtree: command = command + " " + subtree log = subprocess.check_output(command.split(' ')) os.chdir(cwd) lines = log.splitlines() for line in lines: if line.startswith(" Change-Id: "): change_id = line.split()[1] if len(change_id) != 41 or change_id[0] != "I": raise Exception("Invalid Change-Id: %s" % change_id) change_ids.append(change_id) return change_ids def query_gerrit(template, change_ids, repo_name): """query gerrit.""" queries = [] template = "https://review.openstack.org" + template for change_id in change_ids: # ChangeIDs can be used in multiple branches/repos patch_id = urllib.quote_plus("%s~master~" % repo_name) + change_id queries.append(template % patch_id) unsent = (grequests.get(query) for query in queries) for r in grequests.imap(unsent, size=10): try: yield json.loads(r.text[4:]) except AttributeError: # request must have failed, ignore it and move on logger.debug("failed to parse gerrit response") pass def get_change_details(change_ids, repo_name): """get gerrit change details for a list of change_id. Returns a generator """ return query_gerrit("/changes/%s/detail", change_ids, repo_name) def get_latest_revision(change_ids, repo_name): """get latest revisions for a list of change_ids. Returns a generator """ return query_gerrit("/changes/%s/revisions/current/review", change_ids, repo_name) def stats(values): print "Average: %s" % numpy.mean(values) print "median: %s" % numpy.median(values) print "variance: %s" % numpy.var(values)
import json import logging import os import subprocess import urllib import grequests import numpy logging.basicConfig() logger = logging.getLogger("recheck") logger.setLevel(logging.DEBUG) def get_change_ids(repo_path, subtree=None, since="6.months"): """Return array of change-Ids of merged patches. returns list starting with most recent change repo_path: file path of repo since: how far back to look """ change_ids = [] cwd = os.getcwd() os.chdir(repo_path) command = "git log --no-merges --since=%s master" % since if subtree: command = command + " " + subtree log = subprocess.check_output(command.split(' ')) os.chdir(cwd) lines = log.splitlines() for line in lines: if line.startswith(" Change-Id: "): change_id = line.split()[1] if len(change_id) != 41 or change_id[0] != "I": raise Exception("Invalid Change-Id: %s" % change_id) change_ids.append(change_id) return change_ids def query_gerrit(template, change_ids, repo_name): """query gerrit.""" queries = [] template = "https://review.openstack.org" + template for change_id in change_ids: # ChangeIDs can be used in multiple branches/repos patch_id = urllib.quote_plus("%s~master~" % repo_name) + change_id queries.append(template % patch_id) unsent = (grequests.get(query) for query in queries) for r in grequests.map(unsent, size=10): try: yield json.loads(r.text[4:]) except AttributeError: # request must have failed, ignore it and move on logger.debug("failed to parse gerrit response") pass def get_change_details(change_ids, repo_name): """get gerrit change details for a list of change_id. Returns a generator """ return query_gerrit("/changes/%s/detail", change_ids, repo_name) def get_latest_revision(change_ids, repo_name): """get latest revisions for a list of change_ids. Returns a generator """ return query_gerrit("/changes/%s/revisions/current/review", change_ids, repo_name) def stats(values): print "Average: %s" % numpy.mean(values) print "median: %s" % numpy.median(values) print "variance: %s" % numpy.var(values)
apache-2.0
Python
b0f5913d5f775062b8d5e253e1403b995b67c81a
Bump to version 3.2.0
jrief/django-post_office,ui/django-post_office,ui/django-post_office
post_office/__init__.py
post_office/__init__.py
VERSION = (3, 2, 0) from .backends import EmailBackend default_app_config = 'post_office.apps.PostOfficeConfig'
VERSION = (3, 2, 0, 'dev') from .backends import EmailBackend default_app_config = 'post_office.apps.PostOfficeConfig'
mit
Python
f28732596487a2a0fc664c5444e618ce5c23eccd
fix usage
teharrison/pipeline,wgerlach/pipeline,MG-RAST/pipeline,MG-RAST/pipeline,teharrison/pipeline,MG-RAST/pipeline,wgerlach/pipeline,wgerlach/pipeline,teharrison/pipeline
bin/extract_darkmatter.py
bin/extract_darkmatter.py
#!/usr/bin/env python import argparse import leveldb import os import shutil import sys from Bio import SeqIO def main(args): parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities") parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.") parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.") parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file") parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD") parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages") args = parser.parse_args() if ('sims' not in args) or (os.stat(args.sims).st_size == 0): print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output) shutil.copyfile(args.input, args.output) return 0 db = leveldb.LevelDB(args.db) shdl = open(args.sims, 'rU') if args.verbose: print "Reading file %s ... " % args.sims for line in shdl: parts = line.strip().split('\t') db.Put(parts[0], 1) shdl.close() if args.verbose: print "Done" print "Reading file %s ... " % args.input ihdl = open(args.input, 'rU') ohdl = open(args.output, 'w') g_num = 0 d_num = 0 for rec in SeqIO.parse(ihdl, 'fasta'): g_num += 1 try: val = db.Get(rec.id) except KeyError: d_num += 1 ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper())) ihdl.close() ohdl.close() if args.verbose: print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num) return 0 if __name__ == "__main__": sys.exit( main(sys.argv) )
#!/usr/bin/env python import argparse import leveldb import os import shutil import sys from Bio import SeqIO def main(args): parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities") parser.add_argument("-i", "--input", help="Name of input genecall fasta file.") parser.add_argument("-o", "--output", help="Name of output darkmatter fasta file.") parser.add_argument("-s", "--sims", dest="cfile", help="Name of similarity file") parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD") parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages") args = parser.parse_args() if ('sims' not in args) or (os.stat(args.sims).st_size == 0): print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output) shutil.copyfile(args.input, args.output) return 0 db = leveldb.LevelDB(args.db) shdl = open(args.sims, 'rU') if args.verbose: print "Reading file %s ... " % args.sims for line in shdl: parts = line.strip().split('\t') db.Put(parts[0], 1) shdl.close() if args.verbose: print "Done" print "Reading file %s ... " % args.input ihdl = open(args.input, 'rU') ohdl = open(args.output, 'w') g_num = 0 d_num = 0 for rec in SeqIO.parse(ihdl, 'fasta'): g_num += 1 try: val = db.Get(rec.id) except KeyError: d_num += 1 ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper())) ihdl.close() ohdl.close() if args.verbose: print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num) return 0 if __name__ == "__main__": sys.exit( main(sys.argv) )
bsd-2-clause
Python
a5857bc5b019dda8baca03bd68f08b4a26a85911
add import module in init file.
biokit/biokit,biokit/biokit
biokit/rtools/__init__.py
biokit/rtools/__init__.py
from .rtools import *
bsd-2-clause
Python
8599480ed93a0117f326689280c7a896d6bf697a
add version 3.1-4 to r-bayesm (#20807)
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
var/spack/repos/builtin/packages/r-bayesm/package.py
var/spack/repos/builtin/packages/r-bayesm/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RBayesm(RPackage): """Bayesian Inference for Marketing/Micro-Econometrics Covers many important models used in marketing and micro-econometrics applications. The package includes: Bayes Regression (univariate or multivariate dep var), Bayes Seemingly Unrelated Regression (SUR), Binary and Ordinal Probit, Multinomial Logit (MNL) and Multinomial Probit (MNP), Multivariate Probit, Negative Binomial (Poisson) Regression, Multivariate Mixtures of Normals (including clustering), Dirichlet Process Prior Density Estimation with normal base, Hierarchical Linear Models with normal prior and covariates, Hierarchical Linear Models with a mixture of normals prior and covariates, Hierarchical Multinomial Logits with a mixture of normals prior and covariates, Hierarchical Multinomial Logits with a Dirichlet Process prior and covariates, Hierarchical Negative Binomial Regression Models, Bayesian analysis of choice-based conjoint data, Bayesian treatment of linear instrumental variables models, Analysis of Multivariate Ordinal survey data with scale usage heterogeneity (as in Rossi et al, JASA (01)), Bayesian Analysis of Aggregate Random Coefficient Logit Models as in BLP (see Jiang, Manchanda, Rossi 2009) For further reference, consult our book, Bayesian Statistics and Marketing by Rossi, Allenby and McCulloch (Wiley 2005) and Bayesian Non- and Semi-Parametric Methods and Applications (Princeton U Press 2014).""" homepage = "https://cloud.r-project.org/package=bayesm" url = "https://cloud.r-project.org/src/contrib/bayesm_3.1-0.1.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/bayesm" version('3.1-4', sha256='061b216c62bc72eab8d646ad4075f2f78823f9913344a781fa53ea7cf4a48f94') version('3.1-3', sha256='51e4827eca8cd4cf3626f3c2282543df7c392b3ffb843f4bfb386fe104642a10') version('3.1-2', sha256='a332f16e998ab10b17a2b1b9838d61660c36e914fe4d2e388a59f031d52ad736') version('3.1-1', sha256='4854517dec30ab7c994de862aae1998c2d0c5e71265fd9eb7ed36891d4676078') version('3.1-0.1', sha256='5879823b7fb6e6df0c0fe98faabc1044a4149bb65989062df4ade64e19d26411') depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('r-rcpparmadillo', type=('build', 'run'))
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RBayesm(RPackage): """Bayesian Inference for Marketing/Micro-Econometrics""" homepage = "https://cloud.r-project.org/package=bayesm" url = "https://cloud.r-project.org/src/contrib/bayesm_3.1-0.1.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/bayesm" version('3.1-3', sha256='51e4827eca8cd4cf3626f3c2282543df7c392b3ffb843f4bfb386fe104642a10') version('3.1-2', sha256='a332f16e998ab10b17a2b1b9838d61660c36e914fe4d2e388a59f031d52ad736') version('3.1-1', sha256='4854517dec30ab7c994de862aae1998c2d0c5e71265fd9eb7ed36891d4676078') version('3.1-0.1', sha256='5879823b7fb6e6df0c0fe98faabc1044a4149bb65989062df4ade64e19d26411') depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('r-rcpparmadillo', type=('build', 'run'))
lgpl-2.1
Python
52e0f47a3ff67bd0c8a31c6755b384dedd70ee02
update scalasca to latest version, simplify recipe (#11999)
iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack
var/spack/repos/builtin/packages/scalasca/package.py
var/spack/repos/builtin/packages/scalasca/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Scalasca(AutotoolsPackage): """Scalasca is a software tool that supports the performance optimization of parallel programs by measuring and analyzing their runtime behavior. The analysis identifies potential performance bottlenecks - in particular those concerning communication and synchronization - and offers guidance in exploring their causes. """ homepage = "http://www.scalasca.org" url = "http://apps.fz-juelich.de/scalasca/releases/scalasca/2.1/dist/scalasca-2.1.tar.gz" list_url = "https://scalasca.org/scalasca/front_content.php?idart=1072" version('2.5', sha256='7dfa01e383bfb8a4fd3771c9ea98ff43772e415009d9f3c5f63b9e05f2dde0f6') version('2.4', '4a895868258030f700a635eac93d36764f60c8c63673c7db419ea4bcc6b0b760') version('2.3.1', 'a83ced912b9d2330004cb6b9cefa7585') version('2.2.2', '2bafce988b0522d18072f7771e491ab9') version('2.1', 'bab9c2b021e51e2ba187feec442b96e6') depends_on("mpi") # version 2.4+ depends_on('[email protected]:', when='@2.4:') # version 2.3+ depends_on('otf2@2:', when='@2.3:') # version 2.3 depends_on('[email protected]', when='@2.3:2.3.99') # version 2.1 - 2.2 depends_on('[email protected]', when='@2.1:2.2.999') depends_on('[email protected]', when='@2.1:2.2.999') def url_for_version(self, version): return 'http://apps.fz-juelich.de/scalasca/releases/scalasca/{0}/dist/scalasca-{1}.tar.gz'.format(version.up_to(2), version) def configure_args(self): spec = self.spec config_args = ["--enable-shared"] if spec.satisfies('@2.4:'): config_args.append("--with-cube=%s" % spec['cubew'].prefix.bin) else: config_args.append("--with-cube=%s" % spec['cube'].prefix.bin) config_args.append("--with-otf2=%s" % spec['otf2'].prefix.bin) if self.spec['mpi'].name == 'openmpi': config_args.append("--with-mpi=openmpi") elif self.spec.satisfies('^mpich@3:'): config_args.append("--with-mpi=mpich3") return config_args
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Scalasca(AutotoolsPackage): """Scalasca is a software tool that supports the performance optimization of parallel programs by measuring and analyzing their runtime behavior. The analysis identifies potential performance bottlenecks - in particular those concerning communication and synchronization - and offers guidance in exploring their causes. """ homepage = "http://www.scalasca.org" url = "http://apps.fz-juelich.de/scalasca/releases/scalasca/2.1/dist/scalasca-2.1.tar.gz" version('2.4', '4a895868258030f700a635eac93d36764f60c8c63673c7db419ea4bcc6b0b760') version('2.3.1', 'a83ced912b9d2330004cb6b9cefa7585') version('2.2.2', '2bafce988b0522d18072f7771e491ab9') version('2.1', 'bab9c2b021e51e2ba187feec442b96e6') depends_on("mpi") # version 2.4 depends_on('[email protected]:', when='@2.4:') # version 2.3 depends_on('[email protected]', when='@2.3:2.3.99') depends_on('otf2@2:', when='@2.3:') # version 2.1+ depends_on('[email protected]', when='@2.1:2.2.999') depends_on('[email protected]', when='@2.1:2.2.999') def url_for_version(self, version): return 'http://apps.fz-juelich.de/scalasca/releases/scalasca/{0}/dist/scalasca-{1}.tar.gz'.format(version.up_to(2), version) def configure_args(self): spec = self.spec config_args = ["--enable-shared"] if spec.satisfies('@2.4:'): config_args.append("--with-cube=%s" % spec['cubew'].prefix.bin) else: config_args.append("--with-cube=%s" % spec['cube'].prefix.bin) config_args.append("--with-otf2=%s" % spec['otf2'].prefix.bin) if self.spec['mpi'].name == 'openmpi': config_args.append("--with-mpi=openmpi") elif self.spec.satisfies('^mpich@3:'): config_args.append("--with-mpi=mpich3") return config_args
lgpl-2.1
Python
8ffe530025e38d06ffc567fb69e9b96874db3faa
Increase version
crateio/carrier
conveyor/__init__.py
conveyor/__init__.py
__version__ = "0.1.dev3"
__version__ = "0.1.dev2"
bsd-2-clause
Python
20d5f5d5e10dcf118639b4ca538ef7537863145a
add cache 2 hours
IQSS/miniverse,IQSS/miniverse,IQSS/miniverse
dv_apps/dvobject_api/views_dataverses.py
dv_apps/dvobject_api/views_dataverses.py
import json from collections import OrderedDict from django.shortcuts import render from django.http import Http404 from django.conf import settings from django.http import JsonResponse, HttpResponse from django.template.loader import render_to_string from django.forms.models import model_to_dict from django.views.decorators.cache import cache_page from django.core import serializers from dv_apps.dataverses.models import Dataverse from dv_apps.dataverses.util import DataverseUtil def get_pretty_val(request): """Quick check of url param to pretty print JSON""" if request.GET.get('pretty', None) is not None: return True return False @cache_page(60 * 60 * 2) def view_single_dataverse_by_alias(request, alias): try: dv = Dataverse.objects.select_related('dvobject').get(alias=alias) except Dataverse.DoesNotExist: raise Http404 return view_single_dataverse(request, dv) @cache_page(60 * 60 * 2) def view_single_dataverse_by_id(request, dataverse_id): try: dv = Dataverse.objects.select_related('dvobject').get(dvobject__id =dataverse_id) except Dataverse.DoesNotExist: raise Http404 return view_single_dataverse(request, dv) @cache_page(60 * 15) def view_single_dataverse(request, dv): """ Show JSON for a single Dataverse """ if dv is None: raise Http404 assert isinstance(dv, Dataverse), "dv must be a Dataverse object or None" is_pretty = request.GET.get('pretty', None) if is_pretty is not None: is_pretty = True resp_dict = OrderedDict() resp_dict['status'] = "OK" resp_dict['data'] = DataverseUtil(dv).as_json() #model_to_dict(dv) if is_pretty: s = '<pre>%s</pre>' % json.dumps(resp_dict, indent=4) return HttpResponse(s) else: return JsonResponse(resp_dict)#, content_type='application/json')
import json from collections import OrderedDict from django.shortcuts import render from django.http import Http404 from django.conf import settings from django.http import JsonResponse, HttpResponse from django.template.loader import render_to_string from django.forms.models import model_to_dict from django.views.decorators.cache import cache_page from django.core import serializers from dv_apps.dataverses.models import Dataverse from dv_apps.dataverses.util import DataverseUtil def get_pretty_val(request): """Quick check of url param to pretty print JSON""" if request.GET.get('pretty', None) is not None: return True return False def view_single_dataverse_by_alias(request, alias): try: dv = Dataverse.objects.select_related('dvobject').get(alias=alias) except Dataverse.DoesNotExist: raise Http404 return view_single_dataverse(request, dv) def view_single_dataverse_by_id(request, dataverse_id): try: dv = Dataverse.objects.select_related('dvobject').get(dvobject__id =dataverse_id) except Dataverse.DoesNotExist: raise Http404 return view_single_dataverse(request, dv) @cache_page(60 * 15) def view_single_dataverse(request, dv): """ Show JSON for a single Dataverse """ if dv is None: raise Http404 assert isinstance(dv, Dataverse), "dv must be a Dataverse object or None" is_pretty = request.GET.get('pretty', None) if is_pretty is not None: is_pretty = True resp_dict = OrderedDict() resp_dict['status'] = "OK" resp_dict['data'] = DataverseUtil(dv).as_json() #model_to_dict(dv) if is_pretty: s = '<pre>%s</pre>' % json.dumps(resp_dict, indent=4) return HttpResponse(s) else: return JsonResponse(resp_dict)#, content_type='application/json')
mit
Python
4e515f070f844569b84eeb77f7e7eda883bc861e
fix class name
houssine78/vertical-cooperative,houssine78/vertical-cooperative,houssine78/vertical-cooperative
easy_my_coop/wizard/update_share_line.py
easy_my_coop/wizard/update_share_line.py
# -*- coding: utf-8 -*- from openerp import api, fields, models, _ from openerp.exceptions import UserError class ShareLineUpdateInfo(models.TransientModel): _name = "share.line.update.info" @api.model def _get_share_line(self): active_id = self.env.context.get('active_id') return self.env['share.line'].browse(active_id) @api.model def _get_effective_date(self): share_line = self._get_share_line() return share_line.effective_date effective_date = fields.Date(string="effective date", required=True, default=_get_effective_date) cooperator = fields.Many2one(related='share_line.partner_id', string="Cooperator") share_line = fields.Many2one('share.line', string="Share line", default=_get_share_line) @api.multi def update(self): line = self.share_line cooperator = line.partner_id sub_reg = self.env['subscription.register'].search( [('partner_id', '=', cooperator.id), ('share_product_id', '=', line.share_product_id.id), ('quantity', '=', line.share_number), ('date', '=', line.effective_date)]) if sub_reg: if len(sub_reg) > 1: raise UserError(_("Error the update return more than one" " subscription register lines.")) else: line.effective_date = self.effective_date sub_reg.date = self.effective_date return True
# -*- coding: utf-8 -*- from openerp import api, fields, models, _ from openerp.exceptions import UserError class PartnerUpdateInfo(models.TransientModel): _name = "share.line.update.info" @api.model def _get_share_line(self): active_id = self.env.context.get('active_id') return self.env['share.line'].browse(active_id) @api.model def _get_effective_date(self): share_line = self._get_share_line() return share_line.effective_date effective_date = fields.Date(string="effective date", required=True, default=_get_effective_date) cooperator = fields.Many2one(related='share_line.partner_id', string="Cooperator") share_line = fields.Many2one('share.line', string="Share line", default=_get_share_line) @api.multi def update(self): line = self.share_line cooperator = line.partner_id sub_reg = self.env['subscription.register'].search( [('partner_id', '=', cooperator.id), ('share_product_id', '=', line.share_product_id.id), ('quantity', '=', line.share_number), ('date', '=', line.effective_date)]) if sub_reg: if len(sub_reg) > 1: raise UserError(_("Error the update return more than one" " subscription register lines.")) else: line.effective_date = self.effective_date sub_reg.date = self.effective_date return True
agpl-3.0
Python
38db4b0a23e2c2aaf858d0b2bd9d5ae4df819e66
Move imports in mythicbeastsdns component (#28033)
tchellomello/home-assistant,sander76/home-assistant,rohitranjan1991/home-assistant,pschmitt/home-assistant,home-assistant/home-assistant,nkgilley/home-assistant,joopert/home-assistant,Danielhiversen/home-assistant,leppa/home-assistant,kennedyshead/home-assistant,mezz64/home-assistant,sander76/home-assistant,pschmitt/home-assistant,postlund/home-assistant,mezz64/home-assistant,tboyce1/home-assistant,balloob/home-assistant,titilambert/home-assistant,rohitranjan1991/home-assistant,lukas-hetzenecker/home-assistant,Danielhiversen/home-assistant,mKeRix/home-assistant,soldag/home-assistant,home-assistant/home-assistant,Teagan42/home-assistant,GenericStudent/home-assistant,robbiet480/home-assistant,toddeye/home-assistant,Teagan42/home-assistant,partofthething/home-assistant,tboyce021/home-assistant,aronsky/home-assistant,kennedyshead/home-assistant,lukas-hetzenecker/home-assistant,jawilson/home-assistant,rohitranjan1991/home-assistant,turbokongen/home-assistant,titilambert/home-assistant,adrienbrault/home-assistant,mKeRix/home-assistant,tboyce1/home-assistant,w1ll1am23/home-assistant,soldag/home-assistant,mKeRix/home-assistant,FreekingDean/home-assistant,qedi-r/home-assistant,mKeRix/home-assistant,nkgilley/home-assistant,toddeye/home-assistant,tboyce1/home-assistant,tchellomello/home-assistant,joopert/home-assistant,tboyce021/home-assistant,adrienbrault/home-assistant,leppa/home-assistant,GenericStudent/home-assistant,jawilson/home-assistant,qedi-r/home-assistant,sdague/home-assistant,FreekingDean/home-assistant,postlund/home-assistant,robbiet480/home-assistant,w1ll1am23/home-assistant,partofthething/home-assistant,balloob/home-assistant,sdague/home-assistant,aronsky/home-assistant,tboyce1/home-assistant,turbokongen/home-assistant,balloob/home-assistant
homeassistant/components/mythicbeastsdns/__init__.py
homeassistant/components/mythicbeastsdns/__init__.py
"""Support for Mythic Beasts Dynamic DNS service.""" from datetime import timedelta import logging import mbddns import voluptuous as vol from homeassistant.const import ( CONF_DOMAIN, CONF_HOST, CONF_PASSWORD, CONF_SCAN_INTERVAL, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import async_track_time_interval _LOGGER = logging.getLogger(__name__) DOMAIN = "mythicbeastsdns" DEFAULT_INTERVAL = timedelta(minutes=10) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_DOMAIN): cv.string, vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All( cv.time_period, cv.positive_timedelta ), } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Initialize the Mythic Beasts component.""" domain = config[DOMAIN][CONF_DOMAIN] password = config[DOMAIN][CONF_PASSWORD] host = config[DOMAIN][CONF_HOST] update_interval = config[DOMAIN][CONF_SCAN_INTERVAL] session = async_get_clientsession(hass) result = await mbddns.update(domain, password, host, session=session) if not result: return False async def update_domain_interval(now): """Update the DNS entry.""" await mbddns.update(domain, password, host, session=session) async_track_time_interval(hass, update_domain_interval, update_interval) return True
"""Support for Mythic Beasts Dynamic DNS service.""" import logging from datetime import timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import ( CONF_DOMAIN, CONF_HOST, CONF_PASSWORD, CONF_SCAN_INTERVAL, ) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.event import async_track_time_interval _LOGGER = logging.getLogger(__name__) DOMAIN = "mythicbeastsdns" DEFAULT_INTERVAL = timedelta(minutes=10) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_DOMAIN): cv.string, vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All( cv.time_period, cv.positive_timedelta ), } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Initialize the Mythic Beasts component.""" import mbddns domain = config[DOMAIN][CONF_DOMAIN] password = config[DOMAIN][CONF_PASSWORD] host = config[DOMAIN][CONF_HOST] update_interval = config[DOMAIN][CONF_SCAN_INTERVAL] session = async_get_clientsession(hass) result = await mbddns.update(domain, password, host, session=session) if not result: return False async def update_domain_interval(now): """Update the DNS entry.""" await mbddns.update(domain, password, host, session=session) async_track_time_interval(hass, update_domain_interval, update_interval) return True
apache-2.0
Python
5616573372638f2b195714cf02db8a7a02a4678f
Correct column name
RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline
luigi/tasks/rfam/pgload_go_term_mapping.py
luigi/tasks/rfam/pgload_go_term_mapping.py
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from tasks.utils.pgloader import PGLoader from tasks.go_terms.pgload_go_terms import PGLoadGoTerms from .go_term_mapping_csv import RfamGoTermsCSV from .pgload_families import RfamPGLoadFamilies CONTROL_FILE = """LOAD CSV FROM '{filename}' WITH ENCODING ISO-8859-14 HAVING FIELDS ( go_term_id, rfam_model_id ) INTO {db_url} TARGET COLUMNS ( go_term_id, rfam_model_id ) SET search_path = '{search_path}' WITH skip header = 1, fields escaped by double-quote, fields terminated by ',' BEFORE LOAD DO $$ create table if not exists load_rfam_go_terms ( go_term_id character varying(10) COLLATE pg_catalog."default" NOT NULL, rfam_model_id character varying(20) COLLATE pg_catalog."default" NOT NULL ); $$, $$ truncate table load_rfam_go_terms; $$ AFTER LOAD DO $$ insert into rfam_go_terms ( go_term_id, rfam_model_id ) ( select go_term_id, rfam_model_id from load_rfam_go_terms ) ON CONFLICT (go_term_id, rfam_model_id) DO UPDATE SET go_term_id = excluded.go_term_id, rfam_model_id = excluded.rfam_model_id ; $$, $$ drop table load_rfam_go_terms; $$ ; """ class RfamPGLoadGoTerms(PGLoader): # pylint: disable=R0904 """ This will run pgloader on the Rfam go term mapping CSV file. The importing will update any existing mappings and will not produce duplicates. """ def requires(self): return [ RfamGoTermsCSV(), PGLoadGoTerms(), RfamPGLoadFamilies(), ] def control_file(self): filename = RfamGoTermsCSV().output().fn return CONTROL_FILE.format( filename=filename, db_url=self.db_url(table='load_rfam_go_terms'), search_path=self.db_search_path(), )
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from tasks.utils.pgloader import PGLoader from tasks.go_terms.pgload_go_terms import PGLoadGoTerms from .go_term_mapping_csv import RfamGoTermsCSV from .pgload_families import RfamPGLoadFamilies CONTROL_FILE = """LOAD CSV FROM '{filename}' WITH ENCODING ISO-8859-14 HAVING FIELDS ( go_term_id, rfam_model_id ) INTO {db_url} TARGET COLUMNS ( go_term_id, name ) SET search_path = '{search_path}' WITH skip header = 1, fields escaped by double-quote, fields terminated by ',' BEFORE LOAD DO $$ create table if not exists load_rfam_go_terms ( go_term_id character varying(10) COLLATE pg_catalog."default" NOT NULL, rfam_model_id character varying(20) COLLATE pg_catalog."default" NOT NULL ); $$, $$ truncate table load_rfam_go_terms; $$ AFTER LOAD DO $$ insert into rfam_go_terms ( go_term_id, rfam_model_id ) ( select go_term_id, rfam_model_id from load_rfam_go_terms ) ON CONFLICT (go_term_id, rfam_model_id) DO UPDATE SET go_term_id = excluded.go_term_id, rfam_model_id = excluded.rfam_model_id ; $$, $$ drop table load_rfam_go_terms; $$ ; """ class RfamPGLoadGoTerms(PGLoader): # pylint: disable=R0904 """ This will run pgloader on the Rfam go term mapping CSV file. The importing will update any existing mappings and will not produce duplicates. """ def requires(self): return [ RfamGoTermsCSV(), PGLoadGoTerms(), RfamPGLoadFamilies(), ] def control_file(self): filename = RfamGoTermsCSV().output().fn return CONTROL_FILE.format( filename=filename, db_url=self.db_url(table='load_rfam_go_terms'), search_path=self.db_search_path(), )
apache-2.0
Python
1f3325519a72cb98669185149b03b11c1ec25f70
Fix line number convention
refeed/coala-bears,refeed/coala-bears,refeed/coala-bears,refeed/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,refeed/coala-bears,coala/coala-bears,refeed/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,refeed/coala-bears,coala/coala-bears,coala/coala-bears,refeed/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears
bears/c_languages/CPPLintBear.py
bears/c_languages/CPPLintBear.py
import sys from coalib.bearlib.abstractions.Linter import linter from dependency_management.requirements.PipRequirement import PipRequirement from coalib.settings.Setting import typed_list @linter(executable='cpplint', normalize_line_numbers=True, use_stdout=False, use_stderr=True, output_format='regex', output_regex=r'.+:(?P<line>\d+): (?P<message>.+)') class CPPLintBear: """ Check C++ code for Google's C++ style guide. For more information, consult <https://github.com/theandrewdavis/cpplint>. """ LANGUAGES = {'C++'} REQUIREMENTS = {PipRequirement('cpplint', '1.3')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Formatting'} @staticmethod def create_arguments(filename, file, config_file, max_line_length: int = 79, cpplint_ignore: typed_list(str) = (), cpplint_include: typed_list(str) = (), ): """ :param max_line_length: Maximum number of characters for a line. When set to 0 allows infinite line length. :param cpplint_ignore: List of checkers to ignore. :param cpplint_include: List of checkers to explicitly enable. """ if not max_line_length: max_line_length = sys.maxsize ignore = ','.join('-'+part.strip() for part in cpplint_ignore) include = ','.join('+'+part.strip() for part in cpplint_include) return ('--filter=' + ignore + ',' + include, '--linelength=' + str(max_line_length), filename)
import sys from coalib.bearlib.abstractions.Linter import linter from dependency_management.requirements.PipRequirement import PipRequirement from coalib.settings.Setting import typed_list @linter(executable='cpplint', use_stdout=False, use_stderr=True, output_format='regex', output_regex=r'.+:(?P<line>\d+): (?P<message>.+)') class CPPLintBear: """ Check C++ code for Google's C++ style guide. For more information, consult <https://github.com/theandrewdavis/cpplint>. """ LANGUAGES = {'C++'} REQUIREMENTS = {PipRequirement('cpplint', '1.3')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Formatting'} @staticmethod def create_arguments(filename, file, config_file, max_line_length: int = 79, cpplint_ignore: typed_list(str) = (), cpplint_include: typed_list(str) = (), ): """ :param max_line_length: Maximum number of characters for a line. When set to 0 allows infinite line length. :param cpplint_ignore: List of checkers to ignore. :param cpplint_include: List of checkers to explicitly enable. """ if not max_line_length: max_line_length = sys.maxsize ignore = ','.join('-'+part.strip() for part in cpplint_ignore) include = ','.join('+'+part.strip() for part in cpplint_include) return ('--filter=' + ignore + ',' + include, '--linelength=' + str(max_line_length), filename)
agpl-3.0
Python
df7e5f56fdb2a9bc34a0fdf62b5847ee4183d32e
Update import_gist.py
cosacog/import_gist
lib/import_gist/bin/import_gist.py
lib/import_gist/bin/import_gist.py
#!/usr/bin/env python # -*- coding: utf-8 -*- def import_gist(url_gist): ''' import custom functions from gist.github.com usage: mod_name = import_gist(url_gist) params: url_gist: url of gist. be sure to append '/raw/' to the gist url to load script, not html e.g. https://gist.githubusercontent.com/cosacog/67ac95feef8a2a1cd373d43a86fe2c9c/raw/ ''' import os,sys, urllib, tempfile import urllib.request fname_func = 'tmp_func.py' # temporary file name of .py tmp_dir = tempfile.mkdtemp() # check url_gist # append '/' at the end if url_gist[-1] is not '/': url_gist = url_gist + '/' # append 'raw/' at the end if url_gist[-5:] != '/raw/': url_gist = url_gist + 'raw/' urllib.request.urlretrieve(url_gist, filename=os.path.join(tmp_dir,fname_func)) sys.path.append(tmp_dir) import tmp_func as mod_func sys.path.remove(tmp_dir) return mod_func if __name__ =='__main__': print("I'm sorry. There is no main script.")
#!/usr/bin/env python # -*- coding: utf-8 -*- def import_gist(url_gist): ''' import custom functions from gist.github.com usage: mod_name = import_gist(url_gist) params: url_gist: url of gist. be sure to append '/raw/' to the gist url to load script, not html e.g. https://gist.githubusercontent.com/cosacog/67ac95feef8a2a1cd373d43a86fe2c9c/raw/ ''' import os,sys, urllib, tempfile fname_func = 'tmp_func.py' # temporary file name of .py tmp_dir = tempfile.mkdtemp() # check url_gist # append '/' at the end if url_gist[-1] is not '/': url_gist = url_gist + '/' # append 'raw/' at the end if url_gist[-5:] != '/raw/': url_gist = url_gist + 'raw/' urllib.request.urlretrieve(url_gist, filename=os.path.join(tmp_dir,fname_func)) sys.path.append(tmp_dir) import tmp_func as mod_func sys.path.remove(tmp_dir) return mod_func if __name__ =='__main__': print("I'm sorry. There is no main script.")
mit
Python
7ef1afc579c62fa0c713d8db0bf17eb09b498a0b
Add unittest for random_integers
jnishi/chainer,jnishi/chainer,kikusu/chainer,keisuke-umezawa/chainer,wkentaro/chainer,niboshi/chainer,okuta/chainer,AlpacaDB/chainer,tkerola/chainer,niboshi/chainer,hvy/chainer,hvy/chainer,truongdq/chainer,cemoody/chainer,aonotas/chainer,sinhrks/chainer,muupan/chainer,ktnyt/chainer,cupy/cupy,kashif/chainer,chainer/chainer,hvy/chainer,AlpacaDB/chainer,ysekky/chainer,benob/chainer,ronekko/chainer,rezoo/chainer,kiyukuta/chainer,t-abe/chainer,niboshi/chainer,chainer/chainer,cupy/cupy,okuta/chainer,niboshi/chainer,wkentaro/chainer,chainer/chainer,truongdq/chainer,benob/chainer,keisuke-umezawa/chainer,kikusu/chainer,ktnyt/chainer,pfnet/chainer,jnishi/chainer,wkentaro/chainer,t-abe/chainer,chainer/chainer,okuta/chainer,wkentaro/chainer,cupy/cupy,ktnyt/chainer,anaruse/chainer,okuta/chainer,hvy/chainer,jnishi/chainer,keisuke-umezawa/chainer,sinhrks/chainer,keisuke-umezawa/chainer,cupy/cupy,delta2323/chainer,ktnyt/chainer,muupan/chainer
tests/cupy_tests/random_tests/test_sample.py
tests/cupy_tests/random_tests/test_sample.py
import mock import unittest from cupy import cuda from cupy import testing from cupy import random @testing.gpu class TestRandint(unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): device_id = cuda.Device().id self.m = mock.Mock() self.m.interval.return_value = 0 random.generator._random_states = {device_id : self.m} def test_value_error(self): with self.assertRaises(ValueError): random.randint(100, 1) def test_high_and_size_are_none(self): random.randint(3) self.m.interval.assert_called_with(3, None) def test_size_is_none(self): random.randint(3, 5) self.m.interval.assert_called_with(2, None) def test_high_is_none(self): random.randint(3, None, (1, 2, 3)) self.m.interval.assert_called_with(3, (1, 2, 3)) def test_no_none(self): random.randint(3, 5, (1, 2, 3)) self.m.interval.assert_called_with(2, (1, 2, 3)) @testing.gpu class TestRandomIntegers(unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): random.sample_.randint = mock.Mock() def test_normal(self): random.random_integers(3, 5) random.sample_.randint.assert_called_with(3, 6, None) def test_high_is_none(self): random.random_integers(3, None) random.sample_.randint.assert_called_with(1, 4, None) def test_size_is_not_none(self): random.random_integers(3, 5, (1, 2, 3)) random.sample_.randint.assert_called_with(3, 6, (1, 2, 3))
import mock import unittest from cupy import cuda from cupy import testing from cupy import random @testing.gpu class TestRandint(unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): device_id = cuda.Device().id self.m = mock.Mock() self.m.interval.return_value = 0 random.generator._random_states = {device_id : self.m} def test_value_error(self): with self.assertRaises(ValueError): random.randint(100, 1) def test_high_and_size_are_none(self): random.randint(3) self.m.interval.assert_called_with(3, None) def test_size_is_none(self): random.randint(3, 5) self.m.interval.assert_called_with(2, None) def test_high_is_none(self): random.randint(3, None, (1, 2, 3)) self.m.interval.assert_called_with(3, (1, 2, 3)) def test_no_none(self): random.randint(3, 5, (1, 2, 3)) self.m.interval.assert_called_with(2, (1, 2, 3))
mit
Python
5aa2a3e4b724784bbedaa5a436893e5ce28f7c45
Bump version to 0.2.3
edoburu/fluentcms-emailtemplates,edoburu/fluentcms-emailtemplates
fluentcms_emailtemplates/__init__.py
fluentcms_emailtemplates/__init__.py
# following PEP 440 __version__ = "0.2.3"
# following PEP 440 __version__ = "0.2.2"
apache-2.0
Python
64ae848095215715ea7448c517011d64403dee85
Remove useless import
makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin
geotrek/api/mobile/views/trekking.py
geotrek/api/mobile/views/trekking.py
from __future__ import unicode_literals from django.conf import settings from django.db.models import F from geotrek.api.mobile.serializers import trekking as api_serializers from geotrek.api.mobile import viewsets as api_viewsets from geotrek.api.v2.functions import Transform, Length, StartPoint from geotrek.trekking import models as trekking_models class TrekViewSet(api_viewsets.GeotrekViewset): serializer_class = api_serializers.TrekListSerializer serializer_detail_class = api_serializers.TrekDetailSerializer filter_fields = ('difficulty', 'themes', 'networks', 'practice') def get_queryset(self, *args, **kwargs): queryset = trekking_models.Trek.objects.existing()\ .select_related('topo_object', 'difficulty', 'practice') \ .prefetch_related('topo_object__aggregations', 'themes', 'networks', 'attachments', 'information_desks') \ .order_by('pk').annotate(length_2d_m=Length('geom')) if self.action == 'list': queryset = queryset.annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID)) else: queryset = queryset.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID)) return queryset class POIViewSet(api_viewsets.GeotrekViewset): serializer_class = api_serializers.POIListSerializer serializer_detail_class = api_serializers.POIListSerializer queryset = trekking_models.POI.objects.existing() \ .select_related('topo_object', 'type', ) \ .prefetch_related('topo_object__aggregations', 'attachments') \ .annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID), geom3d_transformed=Transform(F('geom_3d'), settings.API_SRID)) \ .order_by('pk') # Required for reliable pagination filter_fields = ('type',)
from __future__ import unicode_literals from django.conf import settings from django.db.models import F from rest_framework_extensions.mixins import DetailSerializerMixin from geotrek.api.mobile.serializers import trekking as api_serializers from geotrek.api.mobile import viewsets as api_viewsets from geotrek.api.v2.functions import Transform, Length, StartPoint from geotrek.trekking import models as trekking_models class TrekViewSet(api_viewsets.GeotrekViewset): serializer_class = api_serializers.TrekListSerializer serializer_detail_class = api_serializers.TrekDetailSerializer filter_fields = ('difficulty', 'themes', 'networks', 'practice') def get_queryset(self, *args, **kwargs): queryset = trekking_models.Trek.objects.existing()\ .select_related('topo_object', 'difficulty', 'practice') \ .prefetch_related('topo_object__aggregations', 'themes', 'networks', 'attachments', 'information_desks') \ .order_by('pk').annotate(length_2d_m=Length('geom')) if self.action == 'list': queryset = queryset.annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID)) else: queryset = queryset.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID)) return queryset class POIViewSet(api_viewsets.GeotrekViewset): serializer_class = api_serializers.POIListSerializer serializer_detail_class = api_serializers.POIListSerializer queryset = trekking_models.POI.objects.existing() \ .select_related('topo_object', 'type', ) \ .prefetch_related('topo_object__aggregations', 'attachments') \ .annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID), geom3d_transformed=Transform(F('geom_3d'), settings.API_SRID)) \ .order_by('pk') # Required for reliable pagination filter_fields = ('type',)
bsd-2-clause
Python
6105e355cf0275e00f284ac6658454905a9b9a07
change import of tfpark
intel-analytics/BigDL,intel-analytics/BigDL,yangw1234/BigDL,intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL
python/chronos/src/bigdl/chronos/forecaster/tfpark_forecaster.py
python/chronos/src/bigdl/chronos/forecaster/tfpark_forecaster.py
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABCMeta, abstractmethod from bigdl.orca.tfpark import KerasModel as TFParkKerasModel import tensorflow as tf from bigdl.chronos.forecaster.abstract import Forecaster class TFParkForecaster(TFParkKerasModel, Forecaster, metaclass=ABCMeta): """ Base class for TFPark KerasModel based Forecast models. """ def __init__(self): """ Build a tf.keras model. Turns the tf.keras model returned from _build into a tfpark.KerasModel """ self.model = self._build() assert (isinstance(self.model, tf.keras.Model)) super().__init__(self.model) @abstractmethod def _build(self): """ Build a tf.keras model. :return: a tf.keras model (compiled) """ pass
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABCMeta, abstractmethod from zoo.tfpark import KerasModel as TFParkKerasModel import tensorflow as tf from bigdl.chronos.forecaster.abstract import Forecaster class TFParkForecaster(TFParkKerasModel, Forecaster, metaclass=ABCMeta): """ Base class for TFPark KerasModel based Forecast models. """ def __init__(self): """ Build a tf.keras model. Turns the tf.keras model returned from _build into a tfpark.KerasModel """ self.model = self._build() assert (isinstance(self.model, tf.keras.Model)) super().__init__(self.model) @abstractmethod def _build(self): """ Build a tf.keras model. :return: a tf.keras model (compiled) """ pass
apache-2.0
Python
60ecc08395eb266f09aa8587bf38aceb59a2b968
Update Scramble_String.py
UmassJin/Leetcode
Array/Scramble_String.py
Array/Scramble_String.py
""" Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively. Below is one possible representation of s1 = "great": great / \ gr eat / \ / \ g r e at / \ a t To scramble the string, we may choose any non-leaf node and swap its two children. For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat". rgeat / \ rg eat / \ / \ r g e at / \ a t We say that "rgeat" is a scrambled string of "great". Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae". rgtae / \ rg tae / \ / \ r g ta e / \ t a We say that "rgtae" is a scrambled string of "great". Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1. """ class Solution: # @return a boolean def isScramble(self, s1, s2): if len(s1) != len(s2): return False if s1 == s2: return True length = len(list(s1)) if sorted(s1) != sorted(s2): return False for i in xrange(1,length): if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]): return True if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:],s2[:-i]): return True return False # Note: # Condition: 1) length_s1 != length_s2 # 2) s1 == s2, s1与s2完全相等 # 3) sorted(s1) 与 sorted(s2)是不是相等 # 4) 比较s1[:i] s2[:i] and s1[i:],s2[i:] # 5) 比较s1[:i], s2[length_s2-i:] and s1[i:],s2[length_s2:-i]
Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively. Below is one possible representation of s1 = "great": great / \ gr eat / \ / \ g r e at / \ a t To scramble the string, we may choose any non-leaf node and swap its two children. For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat". rgeat / \ rg eat / \ / \ r g e at / \ a t We say that "rgeat" is a scrambled string of "great". Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae". rgtae / \ rg tae / \ / \ r g ta e / \ t a We say that "rgtae" is a scrambled string of "great". Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1. class Solution: # @return a boolean def isScramble(self, s1, s2): if len(s1) != len(s2): return False if s1 == s2: return True length = len(list(s1)) if sorted(s1) != sorted(s2): return False for i in xrange(1,length): if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]): return True if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:],s2[:-i]): return True return False # Note: # Condition: 1) length_s1 != length_s2 # 2) s1 == s2, s1与s2完全相等 # 3) sorted(s1) 与 sorted(s2)是不是相等 # 4) 比较s1[:i] s2[:i] and s1[i:],s2[i:] # 5) 比较s1[:i], s2[length_s2-i:] and s1[i:],s2[length_s2:-i]
mit
Python
37e8452ad999f42746be395d193a306f9a893dbf
Update rpc.py
vnitinv/ncclient,joysboy/ncclient,earies/ncclient,einarnn/ncclient,katharh/ncclient,sebastianw/ncclient,cmoberg/ncclient,nwautomator/ncclient,leopoul/ncclient,OpenClovis/ncclient,kroustou/ncclient,nnakamot/ncclient,GIC-de/ncclient,lightlu/ncclient,ncclient/ncclient,aitorhh/ncclient
ncclient/operations/third_party/juniper/rpc.py
ncclient/operations/third_party/juniper/rpc.py
from ncclient.xml_ import * from ncclient.operations.rpc import RPC from ncclient.operations.rpc import RPCReply from ncclient.operations.rpc import RPCError class GetConfiguration(RPC): def request(self, format='xml', filter=None): node = new_ele('get-configuration', {'format':format}) if filter is not None: node.append(filter) return self._request(node) class LoadConfiguration(RPC): def request(self, format='xml', action='merge', target='candidate', config=None): if config is not None: if type(config) == list: config = '\n'.join(config) if action == 'set': format = 'text' node = new_ele('load-configuration', {'action':action, 'format':format}) if format == 'xml': config_node = sub_ele(node, 'configuration') config_node.append(config) if format == 'text' and not action == 'set': config_node = sub_ele(node, 'configuration-text').text = config if action == 'set' and format == 'text': config_node = sub_ele(node, 'configuration-set').text = config return self._request(node) class CompareConfiguration(RPC): def request(self, rollback=0): node = new_ele('get-configuration', {'compare':'rollback', 'rollback':str(rollback)}) return self._request(node) class ExecuteRpc(RPC): def request(self, rpc): if isinstance(rpc, str): rpc = to_ele(rpc) return self._request(rpc) class Command(RPC): def request(self, command=None, format='xml'): node = new_ele('command', {'format':format}) node.text = command return self._request(node) class Reboot(RPC): def request(self): node = new_ele('request-reboot') return self._request(node) class Halt(RPC): def request(self): node = new_ele('request-halt') return self._request(node)
from ncclient.xml_ import * from ncclient.operations.rpc import RPC from ncclient.operations.rpc import RPCReply from ncclient.operations.rpc import RPCError class GetConfiguration(RPC): def request(self, format='xml', filter=None): node = new_ele('get-configuration', {'format':format}) if filter is not None: node.append(filter) return self._request(node) class LoadConfiguration(RPC): def request(self, format='xml', action='merge', target='candidate', config=None): if config is not None: if type(config) == list: config = '\n'.join(config) if action == 'set': format = 'text' node = new_ele('load-configuration', {'action':action, 'format':format}) if format == 'xml': config_node = sub_ele(node, 'configuration') config_node.append(config) if format == 'text' and not action == 'set': config_node = sub_ele(node, 'configuration-text').text = config if action == 'set' and format == 'text': config_node = sub_ele(node, 'configuration-set').text = config print to_xml(node) return self._request(node) class CompareConfiguration(RPC): def request(self, rollback=0): node = new_ele('get-configuration', {'compare':'rollback', 'rollback':str(rollback)}) return self._request(node) class ExecuteRpc(RPC): def request(self, rpc): if isinstance(rpc, str): rpc = to_ele(rpc) return self._request(rpc) class Command(RPC): def request(self, command=None, format='xml'): node = new_ele('command', {'format':format}) node.text = command return self._request(node) class Reboot(RPC): def request(self): node = new_ele('request-reboot') return self._request(node) class Halt(RPC): def request(self): node = new_ele('request-halt') return self._request(node)
apache-2.0
Python
3c33b9d7ea3736329d3e0939b042db08e6365eb5
Move experiments to ``experiments`` module
jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger
dallinger/version.py
dallinger/version.py
"""Dallinger version number.""" __version__ = "2.7.0"
"""Dallinger version number.""" __version__ = "3.0.0a1"
mit
Python
0516ca2a5bfaa162a44f407c13b55ca9487897fe
refresh group/keywords every hour
mctenthij/hortiradar,mctenthij/hortiradar,mctenthij/hortiradar,mctenthij/hortiradar,mctenthij/hortiradar
hortiradar/database/tasks_workers.py
hortiradar/database/tasks_workers.py
from configparser import ConfigParser from time import time from redis import StrictRedis import ujson as json from keywords import get_frog, get_keywords from selderij import app from tasks_master import insert_tweet keywords = get_keywords() keywords_sync_time = time() config = ConfigParser() config.read("tasks_workers.ini") posprob_minimum = config["workers"].getfloat("posprob_minimum") redis = StrictRedis() rt_cache_time = 60 * 60 * 6 @app.task def find_keywords_and_groups(id_str, text, retweet_id_str): """Find the keywords and associated groups in the tweet.""" global keywords, keywords_sync_time if (time() - keywords_sync_time) > 60 * 60: keywords = get_keywords() keywords_sync_time = time() # First check if retweets are already processed in the cache if retweet_id_str: key = "t:%s" % retweet_id_str rt = redis.get(key) if rt: kw, groups, tokens = json.loads(rt) insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master") redis.expire(key, rt_cache_time) return frog = get_frog() tokens = frog.process(text) # a list of dictionaries with frog's analysis per token kw = [] groups = [] for t in tokens: lemma = t["lemma"].lower() k = keywords.get(lemma, None) if k is not None: if t["posprob"] > posprob_minimum: if not t["pos"].startswith(k.pos + "("): continue kw.append(lemma) groups += k.groups kw, groups = list(set(kw)), list(set(groups)) insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master") # put retweets in the cache if retweet_id_str: data = [kw, groups, tokens] redis.set(key, json.dumps(data), ex=rt_cache_time)
from configparser import ConfigParser from redis import StrictRedis import ujson as json from keywords import get_frog, get_keywords from selderij import app from tasks_master import insert_tweet keywords = get_keywords() config = ConfigParser() config.read("tasks_workers.ini") posprob_minimum = config["workers"].getfloat("posprob_minimum") redis = StrictRedis() rt_cache_time = 60 * 60 * 6 @app.task def find_keywords_and_groups(id_str, text, retweet_id_str): """Find the keywords and associated groups in the tweet.""" # First check if retweets are already processed in the cache if retweet_id_str: key = "t:%s" % retweet_id_str rt = redis.get(key) if rt: kw, groups, tokens = json.loads(rt) insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master") redis.expire(key, rt_cache_time) return frog = get_frog() tokens = frog.process(text) # a list of dictionaries with frog's analysis per token kw = [] groups = [] for t in tokens: lemma = t["lemma"].lower() k = keywords.get(lemma, None) if k is not None: if t["posprob"] > posprob_minimum: if not t["pos"].startswith(k.pos + "("): continue kw.append(lemma) groups += k.groups kw, groups = list(set(kw)), list(set(groups)) insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master") # put retweets in the cache if retweet_id_str: data = [kw, groups, tokens] redis.set(key, json.dumps(data), ex=rt_cache_time)
apache-2.0
Python
993d08b0ca0bcf90af77709e58698b7ecc5ba6b5
Update log.py
tomturner/django-tenants,tomturner/django-tenants,tomturner/django-tenants
django_tenants/log.py
django_tenants/log.py
import logging from django.db import connection class TenantContextFilter(logging.Filter): """ Add the current ``schema_name`` and ``domain_url`` to log records. Thanks to @regolith for the snippet on https://github.com/bernardopires/django-tenant-schemas/issues/248 """ def filter(self, record): record.schema_name = connection.tenant.schema_name record.domain_url = getattr(connection.tenant, 'domain_url', None) return True
import logging from django.db import connection class TenantContextFilter(logging.Filter): """ Add the current ``schema_name`` and ``domain_url`` to log records. Thanks to @regolith for the snippet on https://github.com/bernardopires/django-tenant-schemas/issues/248 """ def filter(self, record): record.schema_name = connection.tenant.schema_name record.domain_url = getattr(connection.tenant, 'domain_url', 'none') return True
mit
Python
2756326b134acc6c343be8458870121baed963cb
fix db url
cekk/pergamena,cekk/pergamena,cekk/pergamena
pergamena/settings.py
pergamena/settings.py
# -*- coding: utf-8 -*- import os os_env = os.environ class Config(object): SECRET_KEY = os_env.get('PERGAMENA_SECRET', 'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 ASSETS_DEBUG = False DEBUG_TB_ENABLED = False # Disable Debug toolbar DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class ProdConfig(Config): """Production configuration.""" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = os_env.get('DATABASE_URL') DEBUG_TB_ENABLED = False # Disable Debug toolbar class DevConfig(Config): """Development configuration.""" ENV = 'dev' DEBUG = True SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/pergamena_db' # TODO: Change me DEBUG_TB_ENABLED = True ASSETS_DEBUG = True # Don't bundle/minify static assets CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class TestConfig(Config): TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' BCRYPT_LOG_ROUNDS = 1 # For faster tests WTF_CSRF_ENABLED = False # Allows form testing
# -*- coding: utf-8 -*- import os os_env = os.environ class Config(object): SECRET_KEY = os_env.get('PERGAMENA_SECRET', 'secret-key') # TODO: Change me APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir)) BCRYPT_LOG_ROUNDS = 13 ASSETS_DEBUG = False DEBUG_TB_ENABLED = False # Disable Debug toolbar DEBUG_TB_INTERCEPT_REDIRECTS = False CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class ProdConfig(Config): """Production configuration.""" ENV = 'prod' DEBUG = False SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/pergamena_db' # TODO: Change me DEBUG_TB_ENABLED = False # Disable Debug toolbar class DevConfig(Config): """Development configuration.""" ENV = 'dev' DEBUG = True SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/pergamena_db' # TODO: Change me DEBUG_TB_ENABLED = True ASSETS_DEBUG = True # Don't bundle/minify static assets CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc. class TestConfig(Config): TESTING = True DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite://' BCRYPT_LOG_ROUNDS = 1 # For faster tests WTF_CSRF_ENABLED = False # Allows form testing
bsd-3-clause
Python
b7c531220fe7a46ad56eeeb160effe94510ba4b0
Use handler registration in listener
beezz/pg_bawler,beezz/pg_bawler
pg_bawler/listener.py
pg_bawler/listener.py
#!/usr/bin/env python ''' Listen on given channel for notification. $ python -m pg_bawler.listener mychannel If you installed notification trigger with ``pg_bawler.gen_sql`` then channel is the same as ``tablename`` argument. ''' import argparse import asyncio import importlib import logging import sys import pg_bawler.core LOGGER = logging.getLogger('pg_bawler.listener') class DefaultHandler: def __init__(self): self.count = 0 async def handle_notification(self, notification): self.count += 1 notification_number = self.count LOGGER.info( 'Received notification #%s pid %s from channel %s: %s', notification_number, notification.pid, notification.channel, notification.payload) def get_default_cli_args_parser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--dsn', metavar='DSN', help='Connection string. e.g. `dbname=test user=postgres`') parser.add_argument( '--handler', metavar='HANDLER', default='pg_bawler.listener:default_handler', help=( 'Module and name of python callable.' ' e.g. `pg_bawler.listener:default_handler`')) parser.add_argument( 'channel', metavar='CHANNEL', type=str, help='Name of Notify/Listen channel to listen on.') return parser def resolve_handler(handler_str): module_name, callable_name = handler_str.split(':') return getattr(importlib.import_module(module_name), callable_name) default_handler = DefaultHandler().handle_notification class NotificationListener( pg_bawler.core.BawlerBase, pg_bawler.core.ListenerMixin ): pass def main(): args = get_default_cli_args_parser().parse_args() logging.basicConfig( format='[%(asctime)s][%(name)s][%(levelname)s]: %(message)s', level=logging.DEBUG) LOGGER.info('Starting pg_bawler listener for channel: %s', args.channel) loop = asyncio.get_event_loop() listener = NotificationListener(connection_params={'dsn': args.dsn}) listener.listen_timeout = 5 listener.register_handler(resolve_handler(args.handler)) loop.run_until_complete(listener.register_channel(args.channel)) loop.run_until_complete(listener.listen()) if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python ''' Listen on given channel for notification. $ python -m pg_bawler.listener mychannel If you installed notification trigger with ``pg_bawler.gen_sql`` then channel is the same as ``tablename`` argument. ''' import argparse import asyncio import importlib import logging import sys import pg_bawler.core LOGGER = logging.getLogger('pg_bawler.listener') class DefaultHandler: def __init__(self): self.count = 0 async def handle_notification(self, notification): self.count += 1 notification_number = self.count LOGGER.info( 'Received notification #%s pid %s from channel %s: %s', notification_number, notification.pid, notification.channel, notification.payload) def get_default_cli_args_parser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '--dsn', metavar='DSN', help='Connection string. e.g. `dbname=test user=postgres`') parser.add_argument( '--handler', metavar='HANDLER', default='pg_bawler.listener:default_handler', help=( 'Module and name of python callable.' ' e.g. `pg_bawler.listener:default_handler`')) parser.add_argument( 'channel', metavar='CHANNEL', type=str, help='Name of Notify/Listen channel to listen on.') return parser def resolve_handler(handler_str): module_name, callable_name = handler_str.split(':') return getattr(importlib.import_module(module_name), callable_name) default_handler = DefaultHandler().handle_notification class NotificationListener( pg_bawler.core.BawlerBase, pg_bawler.core.ListenerMixin ): pass def main(): args = get_default_cli_args_parser().parse_args() logging.basicConfig( format='[%(asctime)s][%(name)s][%(levelname)s]: %(message)s', level=logging.DEBUG) LOGGER.info('Starting pg_bawler listener for channel: %s', args.channel) loop = asyncio.get_event_loop() listener = NotificationListener(connection_params={'dsn': args.dsn}) listener.listen_timeout = 5 listener.handler = resolve_handler(args.handler) loop.run_until_complete(listener.register_channel(args.channel)) loop.run_until_complete(listener.listen()) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
Python
5ff6dffeaf757e360a42e22a9df6d74345a4f418
Fix panda part imports
dls-controls/pymalcolm,dls-controls/pymalcolm,dls-controls/pymalcolm
malcolm/parts/pandabox/__init__.py
malcolm/parts/pandabox/__init__.py
# Find all subpackages, MethodMeta decorated callables, and YAML files from malcolm.packageutil import prepare_package __all__ = prepare_package(globals(), __name__) del prepare_package
# Don't import all the parts as they need to be created from # includes.pandabox.hardware_collection() from malcolm.parts.pandabox.pandaboxdriverpart import PandABoxDriverPart
apache-2.0
Python
d2fdf0d91f41350347ba460e33cc04aa1e59eb96
Call the run script from the analysis driver
mdpiper/dakota-swash-parameter-study,mdpiper/dakota-swash-parameter-study
analysis_driver.py
analysis_driver.py
#! /usr/bin/env python # Brokers communication between Dakota and SWASH through files. # # Arguments: # $1 is 'params.in' from Dakota # $2 is 'results.out' returned to Dakota import sys import os import re import shutil from subprocess import call import numpy as np def read(output_file, variable=None): """Read data from a MATfile. Returns a numpy array, or None on an error.""" from scipy.io import loadmat try: mat = loadmat(output_file) var = mat[variable] except IOError: return None else: return(var) def write(results_file, array, labels): """Write a Dakota results file from an input array.""" try: fp = open(results_file, 'w') for i in range(len(array)): fp.write(str(array[i]) + '\t' + labels[i] + '\n') except IOError: raise finally: fp.close() def get_labels(params_file): """Extract labels from a Dakota parameters file.""" labels = [] try: fp = open(params_file, 'r') for line in fp: if re.search('ASV_', line): labels.append(''.join(re.findall(':(\S+)', line))) except IOError: raise finally: fp.close() return(labels) if __name__ == '__main__': # Files and directories. start_dir = os.path.dirname(os.path.realpath(__file__)) input_template = 'INPUT.template' input_file = 'INPUT' output_file = 'bot07.mat' output_file_var = 'Botlev' run_script = 'run_swash.sh' # Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to # incorporate the parameters from Dakota into the SWASH input # template, creating a new SWASH input file. shutil.copy(os.path.join(start_dir, input_template), os.curdir) call(['dprepro', sys.argv[1], input_template, input_file]) # Call SWASH with a script containing PBS commands. job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[1] call(['qsub', '-N', job_name, run_script]) # Calculate the mean and standard deviation of the 'Botlev' output # values for the simulation. Write the output to a Dakota results # file. labels = get_labels(sys.argv[1]) series = read(output_file, output_file_var) if series is not None: m_series = [np.mean(series), np.std(series)] else: m_series = [0, 0] write(sys.argv[2], m_series, labels)
#! /usr/bin/env python # Brokers communication between Dakota and SWASH through files. # # Arguments: # $1 is 'params.in' from Dakota # $2 is 'results.out' returned to Dakota import sys import os import re import shutil from subprocess import call import numpy as np def read(output_file, variable=None): """Read data from a MATfile. Returns a numpy array, or None on an error.""" from scipy.io import loadmat try: mat = loadmat(output_file) var = mat[variable] except IOError: return None else: return(var) def write(results_file, array, labels): """Write a Dakota results file from an input array.""" try: fp = open(results_file, 'w') for i in range(len(array)): fp.write(str(array[i]) + '\t' + labels[i] + '\n') except IOError: raise finally: fp.close() def get_labels(params_file): """Extract labels from a Dakota parameters file.""" labels = [] try: fp = open(params_file, 'r') for line in fp: if re.search('ASV_', line): labels.append(''.join(re.findall(':(\S+)', line))) except IOError: raise finally: fp.close() return(labels) if __name__ == '__main__': # Files and directories. start_dir = os.path.dirname(os.path.realpath(__file__)) input_template = 'INPUT.template' input_file = 'INPUT' output_file = 'bot07.mat' output_file_var = 'Botlev' # Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to # incorporate the parameters from Dakota into the SWASH input # template, creating a new SWASH input file. shutil.copy(os.path.join(start_dir, input_template), os.curdir) call(['dprepro', sys.argv[1], input_template, input_file]) # Call SWASH with the new input file. call(['swash_mpi.exe', input_file]) # Calculate the mean and standard deviation of the 'Botlev' output # values for the simulation. Write the output to a Dakota results # file. labels = get_labels(sys.argv[1]) series = read(output_file, output_file_var) if series is not None: m_series = [np.mean(series), np.std(series)] else: m_series = [0, 0] write(sys.argv[2], m_series, labels)
mit
Python
9e95522c847b12a19cff54737a44f569fe2cf6b7
Add method for getting Candidacy.party_name
opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata-django
opencivicdata/elections/admin/candidacy.py
opencivicdata/elections/admin/candidacy.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Custom administration panels for Candidacy-related models. """ from django import VERSION as django_version from django.contrib import admin from opencivicdata.core.admin import base from .. import models class CandidacySourceInline(base.LinkInline): """ Custom inline administrative panel for the CandidacySource model. """ model = models.CandidacySource @admin.register(models.Candidacy) class CandidacyAdmin(base.ModelAdmin): """ Custom inline administrative panel for the Candidacy model. """ raw_id_fields = ( 'person', 'contest', 'top_ticket_candidacy', ) fields = ( 'candidate_name', 'post', 'filed_date', 'is_incumbent', 'registration_status', 'party', ) + raw_id_fields list_display = ( 'candidate_name', 'contest', 'is_incumbent', 'registration_status', 'id', 'party_name', 'updated_at', ) search_fields = ('candidate_name', 'contest__name', 'post__label', ) list_filter = ( 'party__name', 'is_incumbent', 'registration_status', 'updated_at', ) # date_hierarchy across relations was added to django 1.11 if django_version[0] >= 1 and django_version[1] >= 11: date_hierarchy = 'contest__election__date' inlines = [ CandidacySourceInline, ] def party_name(self, obj): """ Return the name of the Party associated with the Candidacy. """ if obj.party: name = obj.party.name else: name = None return name
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Custom administration panels for Candidacy-related models. """ from django import VERSION as django_version from django.contrib import admin from opencivicdata.core.admin import base from .. import models class CandidacySourceInline(base.LinkInline): """ Custom inline administrative panel for the CandidacySource model. """ model = models.CandidacySource @admin.register(models.Candidacy) class CandidacyAdmin(base.ModelAdmin): """ Custom inline administrative panel for the Candidacy model. """ raw_id_fields = ( 'person', 'contest', 'top_ticket_candidacy', ) fields = ( 'candidate_name', 'post', 'filed_date', 'is_incumbent', 'registration_status', 'party', ) + raw_id_fields list_display = ( 'candidate_name', 'contest', 'is_incumbent', 'registration_status', 'id', 'party__name', 'updated_at', ) search_fields = ('candidate_name', 'contest__name', 'post__label', ) list_filter = ( 'party__name', 'is_incumbent', 'registration_status', 'updated_at', ) # date_hierarchy across relations was added to django 1.11 if django_version[0] >= 1 and django_version[1] >= 11: date_hierarchy = 'contest__election__start_time' inlines = [ CandidacySourceInline, ]
bsd-3-clause
Python
ce266cec800fd921f9b4de82fd9f9666ed2df053
Fix another shit
BrodaNoel/bropy,BrodaNoel/bropy
modules/gy-271/core/get.py
modules/gy-271/core/get.py
# Distributed with a free-will license. # Use it any way you want, profit or free, provided it fits in the licenses of its associated works. # HMC5883 # This code is designed to work with the HMC5883_I2CS I2C Mini Module available from ControlEverything.com. # https://www.controleverything.com/content/Compass?sku=HMC5883_I2CS#tabs-0-product_tabset-2 # Modified by Broda Noel (@BrodaNoel in all social networks) import smbus import time import sys # Get I2C bus bus = smbus.SMBus(1) address = 0x1E # HMC5883 address, 0x1E(30) # Select configuration register A, 0x00(00) # 0x60(96) Normal measurement configuration, Data output rate = 0.75 Hz bus.write_byte_data(address, 0x00, 0x60) # HMC5883 address, 0x1E(30) # Select mode register, 0x02(02) # 0x00(00) Continuous measurement mode bus.write_byte_data(address, 0x02, 0x00) time.sleep(0.5) # HMC5883 address, 0x1E(30) # Read data back from 0x03(03), 6 bytes # X-Axis MSB, X-Axis LSB, Z-Axis MSB, Z-Axis LSB, Y-Axis MSB, Y-Axis LSB data = bus.read_i2c_block_data(address, 0x03, 6) # Convert the data xMag = data[0] * 256 + data[1] if xMag > 32767 : xMag -= 65536 zMag = data[2] * 256 + data[3] if zMag > 32767 : zMag -= 65536 yMag = data[4] * 256 + data[5] if yMag > 32767 : yMag -= 65536 # Output data to screen sys.stdout.write('{ "x": ' + str(xMag) + ', "y": ' + str(yMag) + ', "z": ' + str(zMag) + ' }')
# Distributed with a free-will license. # Use it any way you want, profit or free, provided it fits in the licenses of its associated works. # HMC5883 # This code is designed to work with the HMC5883_I2CS I2C Mini Module available from ControlEverything.com. # https://www.controleverything.com/content/Compass?sku=HMC5883_I2CS#tabs-0-product_tabset-2 # Modified by Broda Noel (@BrodaNoel in all social networks) import smbus import time # Get I2C bus bus = smbus.SMBus(1) address = 0x1E # HMC5883 address, 0x1E(30) # Select configuration register A, 0x00(00) # 0x60(96) Normal measurement configuration, Data output rate = 0.75 Hz bus.write_byte_data(address, 0x00, 0x60) # HMC5883 address, 0x1E(30) # Select mode register, 0x02(02) # 0x00(00) Continuous measurement mode bus.write_byte_data(address, 0x02, 0x00) time.sleep(0.5) # HMC5883 address, 0x1E(30) # Read data back from 0x03(03), 6 bytes # X-Axis MSB, X-Axis LSB, Z-Axis MSB, Z-Axis LSB, Y-Axis MSB, Y-Axis LSB data = bus.read_i2c_block_data(address, 0x03, 6) # Convert the data xMag = data[0] * 256 + data[1] if xMag > 32767 : xMag -= 65536 zMag = data[2] * 256 + data[3] if zMag > 32767 : zMag -= 65536 yMag = data[4] * 256 + data[5] if yMag > 32767 : yMag -= 65536 # Output data to screen sys.stdout.write('{ "x": ' + str(xMag) + ', "y": ' + str(yMag) + ', "z": ' + str(zMag) + ' }')
mit
Python
4fccaeefd67c3c736861870a8fe711a934c96e6d
Add some documentation
b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril
mythril/laser/ethereum/transaction.py
mythril/laser/ethereum/transaction.py
import logging from mythril.laser.ethereum.state import GlobalState, Environment, CalldataType from mythril.laser.ethereum.cfg import Node, Edge, JumpType from z3 import BitVec class CallTransaction: """ Represents a call value transaction """ def __init__(self, callee_address): """ Constructor for Call transaction, sets up all symbolic parameters :param callee_address: Address of the contract that will be called """ self.callee_address = callee_address self.caller = BitVec("caller", 256) self.gas_price = BitVec("gasprice", 256) self.call_value = BitVec("callvalue", 256) self.origin = BitVec("origin", 256) pass def run(self, open_world_states, evm): """ Runs this transaction on the evm starting from the open world states""" for open_world_state in open_world_states: # Initialize the execution environment environment = Environment( open_world_state[self.callee_address], self.caller, [], self.gas_price, self.call_value, self.origin, calldata_type=CalldataType.SYMBOLIC, ) new_node = Node(environment.active_account.contract_name) evm.instructions_covered = [False for _ in environment.code.instruction_list] evm.nodes[new_node.uid] = new_node if open_world_state.node: evm.edges.append(Edge(open_world_state.node.uid, new_node.uid, edge_type=JumpType.Transaction, condition=None)) global_state = GlobalState(open_world_state, environment, new_node) new_node.states.append(global_state) evm.work_list.append(global_state) evm.exec() logging.info("Execution complete") logging.info("Achieved {0:.3g}% coverage".format(evm.coverage))
import logging from mythril.laser.ethereum.state import GlobalState, Environment, CalldataType from mythril.laser.ethereum.cfg import Node, Edge, JumpType from z3 import BitVec class CallTransaction: def __init__(self, callee_address): self.callee_address = callee_address self.caller = BitVec("caller", 256) self.gas_price = BitVec("gasprice", 256) self.call_value = BitVec("callvalue", 256) self.origin = BitVec("origin", 256) pass def run(self, open_world_states, evm): for open_world_state in open_world_states: # Initialize the execution environment environment = Environment( open_world_state[self.callee_address], self.caller, [], self.gas_price, self.call_value, self.origin, calldata_type=CalldataType.SYMBOLIC, ) new_node = Node(environment.active_account.contract_name) evm.instructions_covered = [False for _ in environment.code.instruction_list] evm.nodes[new_node.uid] = new_node if open_world_state.node: evm.edges.append(Edge(open_world_state.node.uid, new_node.uid, edge_type=JumpType.Transaction, condition=None)) global_state = GlobalState(open_world_state, environment, new_node) new_node.states.append(global_state) evm.work_list.append(global_state) evm.exec() logging.info("Execution complete") logging.info("Achieved {0:.3g}% coverage".format(evm.coverage))
mit
Python
9f3bf2756debb4534ddcbf538577044e2bae6528
remove unused import
yohanboniface/memopol-core,yohanboniface/memopol-core,yohanboniface/memopol-core
memopol2/search.py
memopol2/search.py
# -*- coding: utf-8 -*- import os os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import logging from django.db.models import signals from django.conf import settings from whoosh import fields from whoosh.filedb.filestore import FileStorage log = logging.getLogger(__name__) WHOOSH_SCHEMA = fields.Schema(title=fields.TEXT(stored=True), content=fields.TEXT, url=fields.ID(stored=True, unique=True)) def create_index(sender=None, **kwargs): if not os.path.exists(settings.WHOOSH_INDEX): os.mkdir(settings.WHOOSH_INDEX) storage = FileStorage(settings.WHOOSH_INDEX) storage.create_index(WHOOSH_SCHEMA, indexname='memopol') signals.post_syncdb.connect(create_index) def update_index(sender, instance, created, **kwargs): try: url = unicode(instance.get_absolute_url()) except Exception, e: log.critical('Cant resolve url. Content %r not indexed' % instance) return content = getattr(instance, 'content', None) if content is None: content = unicode(instance) elif callable(content): content = content() storage = FileStorage(settings.WHOOSH_INDEX) ix = storage.open_index(indexname='memopol') writer = ix.writer() if created: writer.add_document(title=unicode(instance), content=content, url=url) writer.commit() else: writer.update_document(title=unicode(instance), content=content, url=url) writer.commit() _searchables = [] def searchable(klass): if hasattr(klass, 'get_absolute_url'): signals.post_save.connect(update_index, sender=klass) _searchables.append(klass) if not hasattr(klass, 'content'): log.warn('%s is declared as searchable but has no content attribute' % klass) else: log.warn('%s is declared as searchable but has no get_absolute_url' % klass) return klass def update(): from meps import models from mps import models from reps import models create_index() for klass in _searchables: for i in klass.objects.all(): update_index(None, i, created=False) if __name__ == '__main__': update()
# -*- coding: utf-8 -*- import os os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import logging from django.db.models import signals from django.conf import settings from whoosh import fields, index from whoosh.filedb.filestore import FileStorage log = logging.getLogger(__name__) WHOOSH_SCHEMA = fields.Schema(title=fields.TEXT(stored=True), content=fields.TEXT, url=fields.ID(stored=True, unique=True)) def create_index(sender=None, **kwargs): if not os.path.exists(settings.WHOOSH_INDEX): os.mkdir(settings.WHOOSH_INDEX) storage = FileStorage(settings.WHOOSH_INDEX) storage.create_index(WHOOSH_SCHEMA, indexname='memopol') signals.post_syncdb.connect(create_index) def update_index(sender, instance, created, **kwargs): try: url = unicode(instance.get_absolute_url()) except Exception, e: log.critical('Cant resolve url. Content %r not indexed' % instance) return content = getattr(instance, 'content', None) if content is None: content = unicode(instance) elif callable(content): content = content() storage = FileStorage(settings.WHOOSH_INDEX) ix = storage.open_index(indexname='memopol') writer = ix.writer() if created: writer.add_document(title=unicode(instance), content=content, url=url) writer.commit() else: writer.update_document(title=unicode(instance), content=content, url=url) writer.commit() _searchables = [] def searchable(klass): if hasattr(klass, 'get_absolute_url'): signals.post_save.connect(update_index, sender=klass) _searchables.append(klass) if not hasattr(klass, 'content'): log.warn('%s is declared as searchable but has no content attribute' % klass) else: log.warn('%s is declared as searchable but has no get_absolute_url' % klass) return klass def update(): from meps import models from mps import models from reps import models create_index() for klass in _searchables: for i in klass.objects.all(): update_index(None, i, created=False) if __name__ == '__main__': update()
agpl-3.0
Python
bac0b5e09fc172a991fb6b7172025c698c1a23d9
Add validation that type is type of Rule into MultipleRulesGrammar
PatrikValkovic/grammpy
grammpy/Grammars/MultipleRulesGrammar.py
grammpy/Grammars/MultipleRulesGrammar.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 15.08.2017 14:40 :Licence GNUv3 Part of grammpy """ import inspect from grammpy.exceptions import NotRuleException from .StringGrammar import StringGrammar from ..HashContainer import HashContainer from ..IsMethodsRuleExtension import IsMethodsRuleExtension as Rule, IsMethodsRuleExtension class MultipleRulesGrammar(StringGrammar): def __init__(self, terminals=None, nonterminals=None, rules=None, start_symbol=None): super().__init__(terminals, nonterminals, rules, start_symbol) self._count = 0 def _create_class(self, rule): name = 'SplitRules' + str(self._count) self._count += 1 return type(name, (Rule,), {"rule": rule}) def _transform_rules(self, rules): rules = HashContainer.to_iterable(rules) r = [] for i in rules: if not inspect.isclass(i) or not issubclass(i, IsMethodsRuleExtension): raise NotRuleException(i) if i.is_valid(self) and i.count() > 1: for rule in i.rules: r.append(self._create_class(rule)) else: r.append(i) return r def get_rule(self, rules=None): if rules is None: return super().get_rule() results = super().get_rule(self._transform_rules(rules)) if not HashContainer.is_iterable(rules): return results[0] return results def have_rule(self, rules): return super().have_rule(self._transform_rules(rules)) def remove_rule(self, rules=None): if rules is None: return super().remove_rule() super().remove_rule(self._transform_rules(rules)) def add_rule(self, rules): super().add_rule(self._transform_rules(rules))
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 15.08.2017 14:40 :Licence GNUv3 Part of grammpy """ from .StringGrammar import StringGrammar from ..HashContainer import HashContainer from ..IsMethodsRuleExtension import IsMethodsRuleExtension as Rule class MultipleRulesGrammar(StringGrammar): def __init__(self, terminals=None, nonterminals=None, rules=None, start_symbol=None): super().__init__(terminals, nonterminals, rules, start_symbol) self._count = 0 def _create_class(self, rule): name = 'SplitRules' + str(self._count) self._count += 1 return type(name, (Rule,), {"rule": rule}) def _transform_rules(self, rules): rules = HashContainer.to_iterable(rules) r = [] for i in rules: if i.is_valid(self) and i.count() > 1: for rule in i.rules: r.append(self._create_class(rule)) else: r.append(i) return rules def get_rule(self, rules=None): if rules is None: return super().get_rule() results = super().get_rule(self._transform_rules(rules)) if not HashContainer.is_iterable(rules): return results[0] return results def have_rule(self, rules): return super().have_rule(self._transform_rules(rules)) def remove_rule(self, rules=None): if rules is None: return super().remove_rule() super().remove_rule(self._transform_rules(rules)) def add_rule(self, rules): super().add_rule(self._transform_rules(rules))
mit
Python
6d8b1ea0e459bd3383528fb32e6b1a348b00a9bc
Remove unknown attributes.
kxepal/phoxpy
phoxpy/server/auth.py
phoxpy/server/auth.py
# -*- coding: utf-8 -*- # # Copyright (C) 2011 Alexander Shorin # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # from random import randint from phoxpy import exceptions from phoxpy.messages import PhoxRequest, PhoxResponse from phoxpy.messages import auth from phoxpy.server.main import ServerExtension, request_type __all__ = ['AuthExt'] class AuthExt(ServerExtension): def __init__(self, db): db.update({ 'licenses': set([]), 'users': {}, 'sessions': set([]) }) super(AuthExt, self).__init__(db) def get_session_id(self): return str(randint(10000, 50000)) def add_license(self, key): self.db['licenses'].add(key) def add_user(self, login, password): self.db['users'][login] = password @request_type(auth.AuthRequest) def handle_login(self, request): if request.client_id not in self.db['licenses']: raise exceptions.LicenseNotFound(request.client_id) if request.instance_count is None: raise exceptions.LisBaseException(654) if request.login not in self.db['users']: raise exceptions.UnknownUser() if self.db['users'][request.login] != request.password: raise exceptions.AuthentificationError() sessionid = self.get_session_id() self.db['sessions'].add(sessionid) return auth.AuthResponse( sessionid=sessionid, buildnumber=self.build_number, version=self.server_version ) @request_type(PhoxRequest) def handle_logout(self, request): if request.sessionid not in self.db['sessions']: raise exceptions.UnknownSession() self.db['sessions'].remove(request.sessionid) return PhoxResponse(sessionid=request.sessionid)
# -*- coding: utf-8 -*- # # Copyright (C) 2011 Alexander Shorin # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # from random import randint from phoxpy import exceptions from phoxpy.messages import PhoxRequest, PhoxResponse from phoxpy.messages import auth from phoxpy.server.main import ServerExtension, request_type __all__ = ['AuthExt'] class AuthExt(ServerExtension): def __init__(self, db): db.update({ 'licenses': set([]), 'users': {}, 'sessions': set([]) }) super(AuthExt, self).__init__(db) def get_session_id(self): return str(randint(10000, 50000)) def add_license(self, key): self.db['licenses'].add(key) def add_user(self, login, password): self.db['users'][login] = password @request_type(auth.AuthRequest) def handle_login(self, request): if request.client_id not in self.db['licenses']: raise exceptions.LicenseNotFound(request.client_id) if request.instance_count is None: raise exceptions.LisBaseException(654) if request.login not in self.db['users']: raise exceptions.UnknownUser() if self.db['users'][request.login] != request.password: raise exceptions.AuthentificationError() sessionid = self.get_session_id() self.db['sessions'].add(sessionid) return auth.AuthResponse( sessionid=sessionid, buildnumber=self.build_number, version=self.server_version ) @request_type(PhoxRequest) def handle_logout(self, request): if request.sessionid not in self.db['sessions']: raise exceptions.UnknownSession() self.db['sessions'].remove(request.sessionid) return PhoxResponse( buildnumber=request.buildnumber, sessionid=request.sessionid, version=request.version )
bsd-3-clause
Python
e2cbc0a3acf793ca8c45eb17cb0071a254a7e2b7
Update parse_indepexpends.py
SpencerNorris/SuperPACs,SpencerNorris/SuperPACs,SpencerNorris/SuperPACs,SpencerNorris/SuperPACs
server/src/datasource/parse_indepexpends.py
server/src/datasource/parse_indepexpends.py
from datasource import fec from datasource import propublica import os FEC_APIKEY = os.getenv('FEC_API_KEY', '') ProPublica_APIKEY = os.getenv('PP_API_KEY', '') FecApiObj = fec.FECAPI(FEC_APIKEY) committees = FecApiObj.get_committees() PPCampFinObj = propublica.CampaignFinanceAPI(ProPublica_APIKEY) PPCongressApi = propublica.CongressAPI(ProPublica_APIKEY) legislator_index = list() legislators = PPCongressApi.list_members('house')["results"][0]["members"] for legislator in legislators: name = str(legislator['first_name']) + " " + str(legislator['last_name']) legislator_index.append(name) legislators = PPCongressApi.list_members('senate')["results"][0]["members"] for legislator in legislators: name = str(legislator['first_name']) + " " + str(legislator['last_name']) legislator_index.append(name) for committee in committees: if(2016 in committee['cycles']): indepExpend = PPCampFinObj.get_indep_expends(str(committee['committee_id'])) for expend in indepExpend["results"]: if(expend['candidate_name'] in legislator_index): #expend fo a particular expenditure
from datasource import fec from datasource import propublica import os FEC_APIKEY = os.getenv('FEC_API_KEY', '') ProPublica_APIKEY = os.getenv('PP_API_KEY', '') FecApiObj = fec.FECAPI(FEC_APIKEY) committees = FecApiObj.get_committees() PPCampFinObj = propublica.CampaignFinanceAPI(ProPublica_APIKEY) datafile = open("IndepExpends.json", 'w') for committee in committees: if(2016 in committee['cycles']): indepExpend = PPCampFinObj.get_indep_expends(str(committee['committee_id'])) datafile.write(str(indepExpend)) datafile.close()
apache-2.0
Python
e751329b8aacdf51b70537be47172386deaded63
Fix alembic env
rootio/rootio_web,rootio/rootio_web,rootio/rootio_web,rootio/rootio_web
alembic/env.py
alembic/env.py
from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata import os,sys parentdir = os.path.abspath(os.path.join('.', '.')) sys.path.insert(0,parentdir) from rootio.app import create_app from rootio.extensions import db app = create_app() config.set_main_option("sqlalchemy.url", app.config["SQLALCHEMY_DATABASE_URI"]) target_metadata = db.Model.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure(url=url) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, compare_type=True ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata import os,sys parentdir = os.path.abspath(os.path.join('.', '.')) sys.path.insert(0,parentdir) from rootio import create_app from rootio.extensions import db app = create_app() config.set_main_option("sqlalchemy.url", app.config["SQLALCHEMY_DATABASE_URI"]) target_metadata = db.Model.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure(url=url) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, compare_type=True ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
agpl-3.0
Python
b76e91c4517e52528f8543fce276ff4b5af9a4f6
fix temp file creation to something more multiplatform friendly
pablodav/burp_server_reports,pablodav/burp_server_reports
burp_reports/lib/files.py
burp_reports/lib/files.py
import tempfile import os def temp_file(file='temporal'): """ return: str with tempfilename """ # Append uid to end of filename file += '_{}'.format(os.getlogin()) # Simplified and reutilized core funtionally from python cache_path = os.path.join(tempfile.gettempdir(), file) return cache_path
import tempfile import os def temp_file(file='temporal'): """ return: str with tempfilename """ # Append uid to end of filename file += '_{}'.format(os.getuid()) # Simplified and reutilized core funtionally from python cache_path = os.path.join(tempfile.gettempdir(), file) return cache_path
mit
Python
632b86289ef643381c954adeca1f58c78e2aa8d5
Add documentation for plugins
danielmorosan/Cactus,ibarria0/Cactus,fjxhkj/Cactus,gone/Cactus,chaudum/Cactus,dreadatour/Cactus,Knownly/Cactus,ibarria0/Cactus,danielmorosan/Cactus,Bluetide/Cactus,dreadatour/Cactus,juvham/Cactus,dreadatour/Cactus,ibarria0/Cactus,PegasusWang/Cactus,koobs/Cactus,koenbok/Cactus,koobs/Cactus,Bluetide/Cactus,eudicots/Cactus,page-io/Cactus,andyzsf/Cactus-,gone/Cactus,fjxhkj/Cactus,gone/Cactus,Knownly/Cactus,juvham/Cactus,fjxhkj/Cactus,danielmorosan/Cactus,juvham/Cactus,koenbok/Cactus,chaudum/Cactus,andyzsf/Cactus-,PegasusWang/Cactus,eudicots/Cactus,koenbok/Cactus,eudicots/Cactus,chaudum/Cactus,Knownly/Cactus,PegasusWang/Cactus,koobs/Cactus,Bluetide/Cactus,page-io/Cactus,page-io/Cactus,andyzsf/Cactus-
cactus/plugin/defaults.py
cactus/plugin/defaults.py
#coding:utf-8 # Define no-op plugin methods def preBuildPage(page, context, data): """ Called prior to building a page. :param page: The page about to be built :param context: The context for this page (you can modify this, but you must return it) :param data: The raw body for this page (you can modify this). :returns: Modified (or not) context and data. """ return context, data def postBuildPage(page): """ Called after building a page. :param page: The page that was just built. :returns: None """ pass def preBuildStatic(static): """ Called before building (copying to the build folder) a static file. :param static: The static file about to be built. :returns: None """ pass def postBuildStatic(static): """ Called after building (copying to the build folder) a static file. :param static: The static file that was just built. :returns: None """ pass def preBuild(site): """ Called prior to building the site, after loading configuration, plugins and externals. :param site: The site about to be built. :returns: None """ pass def postBuild(site): """ Called after building the site. :param site: The site that was just built. :returns: None """ pass def preDeploy(site): """ Called prior to deploying the site (built files) :param site: The site about to be deployed. :returns: None """ pass def postDeploy(site): """ Called after deploying the site (built files) :param site: The site that was just built. :returns: None """ pass def preDeployFile(file): """ Called prior to deploying a single built file :param file: The file about to be deployed. :returns: None """ pass ORDER = -1 DEFAULTS = [ 'preBuildPage', 'postBuildPage', 'preBuildStatic', 'postBuildStatic', 'preBuild', 'postBuild', 'preDeploy', 'postDeploy', 'preDeployFile', ]
#coding:utf-8 # Define no-op plugin methods def preBuildPage(page, context, data): return context, data def postBuildPage(page): pass def preBuildStatic(static): pass def postBuildStatic(static): pass def preBuild(site): pass def postBuild(site): pass def preDeploy(site): pass def postDeploy(site): pass def preDeployFile(file): pass ORDER = -1 DEFAULTS = [ 'preBuildPage', 'postBuildPage', 'preBuildStatic', 'postBuildStatic', 'preBuild', 'postBuild', 'preDeploy', 'postDeploy', 'preDeployFile', ]
bsd-3-clause
Python
04fd80cda56a911289bca20c7ee1bd70ac263bd4
set readonly from true to false because the cursor is hidded if readonly is true.
ya790206/call_seq
call_seq/TextEdit/rich.py
call_seq/TextEdit/rich.py
from PySide import QtCore import pyqode.python # public API from pyqode.python.bootstrapper import Bootstrapper from pyqode.python.modes import PyAutoCompleteMode from pyqode.python.modes import CalltipsMode from pyqode.python.modes import CommentsMode from pyqode.python.modes import PyCodeCompletionMode, JediCompletionProvider from pyqode.python.modes import PEP8CheckerMode from pyqode.python.modes import PyAutoIndentMode from pyqode.python.modes import PyFlakesCheckerMode from pyqode.python.modes import PyHighlighterMode from pyqode.python.modes import PyIndenterMode from pyqode.python.modes import DEFAULT_DARK_STYLES from pyqode.python.modes import DEFAULT_LIGHT_STYLES from pyqode.python.modes import GoToAssignmentsMode from pyqode.python.modes import DocumentAnalyserMode from pyqode.python.panels import PreLoadPanel from pyqode.python.panels import SymbolBrowserPanel from pyqode.core.modes import CaretLineHighlighterMode from pyqode.python.panels import QuickDocPanel class RichTextEdit(pyqode.core.QCodeEdit): def __init__(self): super(RichTextEdit, self).__init__() self.setLineWrapMode(self.NoWrap) self.installPanel(pyqode.core.LineNumberPanel(), pyqode.core.PanelPosition.LEFT) self.installMode(pyqode.core.ZoomMode()) #self.installMode(pyqode.core.FileWatcherMode()) self.installMode(pyqode.core.SymbolMatcherMode()) self.installMode(pyqode.core.WordClickMode()) self.installMode(PyHighlighterMode(self.document())) self.installMode(PyAutoIndentMode()) self.installMode(PyFlakesCheckerMode()) self.installMode(PEP8CheckerMode()) self.installMode(CalltipsMode()) self.installMode(PyIndenterMode()) self.installMode(GoToAssignmentsMode()) self.installPanel(QuickDocPanel(), pyqode.core.PanelPosition.BOTTOM) self.installMode(CommentsMode()) self.installMode(CaretLineHighlighterMode()) self.setReadOnly(False)
from PySide import QtCore import pyqode.python # public API from pyqode.python.bootstrapper import Bootstrapper from pyqode.python.modes import PyAutoCompleteMode from pyqode.python.modes import CalltipsMode from pyqode.python.modes import CommentsMode from pyqode.python.modes import PyCodeCompletionMode, JediCompletionProvider from pyqode.python.modes import PEP8CheckerMode from pyqode.python.modes import PyAutoIndentMode from pyqode.python.modes import PyFlakesCheckerMode from pyqode.python.modes import PyHighlighterMode from pyqode.python.modes import PyIndenterMode from pyqode.python.modes import DEFAULT_DARK_STYLES from pyqode.python.modes import DEFAULT_LIGHT_STYLES from pyqode.python.modes import GoToAssignmentsMode from pyqode.python.modes import DocumentAnalyserMode from pyqode.python.panels import PreLoadPanel from pyqode.python.panels import SymbolBrowserPanel from pyqode.core.modes import CaretLineHighlighterMode from pyqode.python.panels import QuickDocPanel class RichTextEdit(pyqode.core.QCodeEdit): def __init__(self): super(RichTextEdit, self).__init__() self.setLineWrapMode(self.NoWrap) self.installPanel(pyqode.core.LineNumberPanel(), pyqode.core.PanelPosition.LEFT) self.installMode(pyqode.core.ZoomMode()) #self.installMode(pyqode.core.FileWatcherMode()) self.installMode(pyqode.core.SymbolMatcherMode()) self.installMode(pyqode.core.WordClickMode()) self.installMode(PyHighlighterMode(self.document())) self.installMode(PyAutoIndentMode()) self.installMode(PyFlakesCheckerMode()) self.installMode(PEP8CheckerMode()) self.installMode(CalltipsMode()) self.installMode(PyIndenterMode()) self.installMode(GoToAssignmentsMode()) self.installPanel(QuickDocPanel(), pyqode.core.PanelPosition.BOTTOM) self.installMode(CommentsMode()) self.installMode(CaretLineHighlighterMode()) self.setReadOnly(True)
apache-2.0
Python
29aed8ce12734ac0489a8b4e4aa9b48ff4a320a7
fix fail
shish/firehose
client/cli.py
client/cli.py
#!/usr/bin/env python import base64 import sys import logging import firehose.common as common class CLI(common.FirehoseClient): def __select(self, chums, prompt): print prompt for n, chum in enumerate(chums): print "%02d> %s (%s)" % (n, chum.name, chum.keyid) inp = raw_input("Enter ID number> ") return chums[int(inp)] def main(self, args=sys.argv): common.FirehoseClient.__init__(self) self.load_config() try: my_self = self.__select(self.get_identities(), "Select an identity to send as:") my_chum = self.__select(self.get_chums(), "Select somebody to send to:") self.set_identity(my_self) self.start_recv_thread() while True: data = raw_input("Send to %s> " % my_chum.name) cmd, _, args = data.partition(" ") if cmd == "/me": data = "ACT " + args elif cmd == "/ping": data = "PING 0" else: data = "MSG " + data my_chum.send(data) except (EOFError, KeyboardInterrupt): pass def on_msg(self, chum, target, message): print "%s: %s" % (chum.name, message) def on_act(self, chum, target, message): print "* %s %s" % (chum.name, message) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG, format="%(asctime)19.19s %(levelname)4.4s %(name)s: %(message)s") module_log = logging.getLogger("firehose") module_log.setLevel(logging.DEBUG) module_log = logging.getLogger("gnupg") module_log.setLevel(logging.INFO) sys.exit(CLI().main(sys.argv))
#!/usr/bin/env python import base64 import sys import logging import firehose.common as common class CLI(common.FirehoseClient): def __select(self, chums, prompt): print prompt for n, chum in enumerate(chums): print "%02d> %s (%s)" % (n, chum.name, chum.keyid) inp = raw_input("Enter ID number> ") return chums[int(inp)] def main(self, args=sys.argv): common.FirehoseClient.__init__(self) self.load_config() try: my_self = self.__select(self.get_identities(), "Select an identity to send as:") my_chum = self.__select(self.get_chums(), "Select somebody to send to:") self.set_identity(my_self) self.start_recv_thread() while True: data = raw_input("Send to %s> " % chum.name) cmd, _, args = data.partition(" ") if cmd == "/me": data = "ACT " + args elif cmd == "/ping": data = "PING 0" else: data = "MSG " + data my_chum.send(data) except (EOFError, KeyboardInterrupt): pass def on_msg(self, chum, target, message): print "%s: %s" % (chum.name, message) def on_act(self, chum, target, message): print "* %s %s" % (chum.name, message) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG, format="%(asctime)19.19s %(levelname)4.4s %(name)s: %(message)s") module_log = logging.getLogger("firehose") module_log.setLevel(logging.DEBUG) module_log = logging.getLogger("gnupg") module_log.setLevel(logging.INFO) sys.exit(CLI().main(sys.argv))
mit
Python
3157bbd5cca51ea2ac0c086a9337296c6652fafc
fix url order
sunlightlabs/django-citizendialer3000
citizendialer3000/urls.py
citizendialer3000/urls.py
from django.conf.urls.defaults import * urlpatterns = patterns('citizendialer3000.views', url(r'^$', 'callcampaign_list', name='call_list'), url(r'^(?P<slug>[\w\-]+)/$', 'callcampaign_detail', name='call_campaign'), url(r'^(?P<slug>[\w\-]+)/thankyou/$', 'complete', name='call_complete'), url(r'^(?P<slug>[\w\-]+)/results/$', 'results', name='results'), url(r'^(?P<slug>[\w\-]+)/results/calls.csv$', 'results_calls', name='results_calls'), url(r'^(?P<slug>[\w\-]+)/results/summary.csv$', 'results_summary', name='results_summary'), url(r'^(?P<slug>[\w\-]+)/(?P<bioguide_id>\w+)/$', 'contact_detail', name='call_contact'), )
from django.conf.urls.defaults import * urlpatterns = patterns('citizendialer3000.views', url(r'^$', 'callcampaign_list', name='call_list'), url(r'^(?P<slug>[\w\-]+)/$', 'callcampaign_detail', name='call_campaign'), url(r'^(?P<slug>[\w\-]+)/(?P<bioguide_id>\w+)/$', 'contact_detail', name='call_contact'), url(r'^(?P<slug>[\w\-]+)/thankyou/$', 'complete', name='call_complete'), url(r'^(?P<slug>[\w\-]+)/results/$', 'results', name='results'), url(r'^(?P<slug>[\w\-]+)/results/calls.csv$', 'results_calls', name='results_calls'), url(r'^(?P<slug>[\w\-]+)/results/summary.csv$', 'results_summary', name='results_summary'), )
bsd-3-clause
Python
302934bfd8b30ee1b33cdfb60ca36021df153746
improve cleanup process of test by removing the downloaded file
oyamad/QuantEcon.py,oyamad/QuantEcon.py,QuantEcon/QuantEcon.py,QuantEcon/QuantEcon.py
quantecon/util/tests/test_notebooks.py
quantecon/util/tests/test_notebooks.py
""" Tests for Notebook Utilities Functions --------- fetch_nb_dependencies """ from quantecon.util import fetch_nb_dependencies import unittest import os FILES = ['README.md'] REPO = "https://github.com/QuantEcon/QuantEcon.py" RAW = "raw" BRANCH = "master" class TestNotebookUtils(unittest.TestCase): def test_fetch_nb_dependencies(self): """ Run First and Test Download """ status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) self.assertFalse(False in status) def test_fetch_nb_dependencies_overwrite(self): """ Run Second and Ensure file is skipped by checking a False is found in status """ status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) #First will succeed status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) #Second should skip self.assertTrue(False in status) def tearDown(self): os.remove("README.md")
""" Tests for Notebook Utilities Functions --------- fetch_nb_dependencies """ from quantecon.util import fetch_nb_dependencies import unittest FILES = ['README.md'] REPO = "https://github.com/QuantEcon/QuantEcon.py" RAW = "raw" BRANCH = "master" class TestNotebookUtils(unittest.TestCase): def test_fetch_nb_dependencies(self): """ Run First and Test Download """ status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) self.assertFalse(False in status) def test_fetch_nb_dependencies_overwrite(self): """ Run Second and Ensure file is skipped by checking a False is found in status """ status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) self.assertTrue(False in status)
bsd-3-clause
Python
02b7d5416ad55b78e256e58ed6a282681d1df48d
Add required get_model for Haystack 2.0
michaelmcandrew/readthedocs.org,Carreau/readthedocs.org,jerel/readthedocs.org,techtonik/readthedocs.org,wijerasa/readthedocs.org,sunnyzwh/readthedocs.org,rtfd/readthedocs.org,asampat3090/readthedocs.org,Carreau/readthedocs.org,nikolas/readthedocs.org,ojii/readthedocs.org,sunnyzwh/readthedocs.org,safwanrahman/readthedocs.org,Tazer/readthedocs.org,clarkperkins/readthedocs.org,cgourlay/readthedocs.org,kenshinthebattosai/readthedocs.org,kenwang76/readthedocs.org,tddv/readthedocs.org,sunnyzwh/readthedocs.org,VishvajitP/readthedocs.org,mrshoki/readthedocs.org,mhils/readthedocs.org,d0ugal/readthedocs.org,tddv/readthedocs.org,ojii/readthedocs.org,attakei/readthedocs-oauth,gjtorikian/readthedocs.org,clarkperkins/readthedocs.org,hach-que/readthedocs.org,sils1297/readthedocs.org,sunnyzwh/readthedocs.org,istresearch/readthedocs.org,atsuyim/readthedocs.org,CedarLogic/readthedocs.org,mrshoki/readthedocs.org,nikolas/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,hach-que/readthedocs.org,davidfischer/readthedocs.org,VishvajitP/readthedocs.org,istresearch/readthedocs.org,kenwang76/readthedocs.org,kenshinthebattosai/readthedocs.org,asampat3090/readthedocs.org,stevepiercy/readthedocs.org,CedarLogic/readthedocs.org,royalwang/readthedocs.org,clarkperkins/readthedocs.org,ojii/readthedocs.org,sils1297/readthedocs.org,singingwolfboy/readthedocs.org,Tazer/readthedocs.org,raven47git/readthedocs.org,SteveViss/readthedocs.org,nyergler/pythonslides,soulshake/readthedocs.org,wijerasa/readthedocs.org,raven47git/readthedocs.org,johncosta/private-readthedocs.org,kenshinthebattosai/readthedocs.org,CedarLogic/readthedocs.org,royalwang/readthedocs.org,jerel/readthedocs.org,istresearch/readthedocs.org,safwanrahman/readthedocs.org,CedarLogic/readthedocs.org,espdev/readthedocs.org,sid-kap/readthedocs.org,titiushko/readthedocs.org,emawind84/readthedocs.org,agjohnson/readthedocs.org,atsuyim/readthedocs.org,royalwang/readthedocs.org,stevepiercy/readthedocs.org,asampat3090/readthedocs.org,sils1297/readthedocs.org,mrshoki/readthedocs.org,wijerasa/readthedocs.org,SteveViss/readthedocs.org,KamranMackey/readthedocs.org,takluyver/readthedocs.org,GovReady/readthedocs.org,techtonik/readthedocs.org,michaelmcandrew/readthedocs.org,KamranMackey/readthedocs.org,kenwang76/readthedocs.org,d0ugal/readthedocs.org,istresearch/readthedocs.org,cgourlay/readthedocs.org,nikolas/readthedocs.org,jerel/readthedocs.org,LukasBoersma/readthedocs.org,safwanrahman/readthedocs.org,fujita-shintaro/readthedocs.org,mrshoki/readthedocs.org,wanghaven/readthedocs.org,attakei/readthedocs-oauth,espdev/readthedocs.org,VishvajitP/readthedocs.org,VishvajitP/readthedocs.org,jerel/readthedocs.org,kdkeyser/readthedocs.org,takluyver/readthedocs.org,fujita-shintaro/readthedocs.org,emawind84/readthedocs.org,atsuyim/readthedocs.org,clarkperkins/readthedocs.org,sils1297/readthedocs.org,rtfd/readthedocs.org,johncosta/private-readthedocs.org,safwanrahman/readthedocs.org,takluyver/readthedocs.org,attakei/readthedocs-oauth,soulshake/readthedocs.org,titiushko/readthedocs.org,sid-kap/readthedocs.org,singingwolfboy/readthedocs.org,KamranMackey/readthedocs.org,techtonik/readthedocs.org,sid-kap/readthedocs.org,soulshake/readthedocs.org,mhils/readthedocs.org,espdev/readthedocs.org,titiushko/readthedocs.org,GovReady/readthedocs.org,tddv/readthedocs.org,cgourlay/readthedocs.org,davidfischer/readthedocs.org,GovReady/readthedocs.org,techtonik/readthedocs.org,agjohnson/readthedocs.org,Carreau/readthedocs.org,stevepiercy/readthedocs.org,LukasBoersma/readthedocs.org,GovReady/readthedocs.org,gjtorikian/readthedocs.org,agjohnson/readthedocs.org,dirn/readthedocs.org,dirn/readthedocs.org,cgourlay/readthedocs.org,SteveViss/readthedocs.org,d0ugal/readthedocs.org,ojii/readthedocs.org,attakei/readthedocs-oauth,wanghaven/readthedocs.org,nyergler/pythonslides,davidfischer/readthedocs.org,johncosta/private-readthedocs.org,atsuyim/readthedocs.org,dirn/readthedocs.org,singingwolfboy/readthedocs.org,singingwolfboy/readthedocs.org,sid-kap/readthedocs.org,raven47git/readthedocs.org,LukasBoersma/readthedocs.org,KamranMackey/readthedocs.org,nikolas/readthedocs.org,mhils/readthedocs.org,kdkeyser/readthedocs.org,espdev/readthedocs.org,fujita-shintaro/readthedocs.org,d0ugal/readthedocs.org,espdev/readthedocs.org,pombredanne/readthedocs.org,emawind84/readthedocs.org,laplaceliu/readthedocs.org,nyergler/pythonslides,nyergler/pythonslides,dirn/readthedocs.org,takluyver/readthedocs.org,fujita-shintaro/readthedocs.org,gjtorikian/readthedocs.org,pombredanne/readthedocs.org,wijerasa/readthedocs.org,laplaceliu/readthedocs.org,rtfd/readthedocs.org,kdkeyser/readthedocs.org,wanghaven/readthedocs.org,Carreau/readthedocs.org,mhils/readthedocs.org,raven47git/readthedocs.org,michaelmcandrew/readthedocs.org,gjtorikian/readthedocs.org,stevepiercy/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,Tazer/readthedocs.org,agjohnson/readthedocs.org,emawind84/readthedocs.org,SteveViss/readthedocs.org,royalwang/readthedocs.org,titiushko/readthedocs.org,hach-que/readthedocs.org,wanghaven/readthedocs.org,soulshake/readthedocs.org,laplaceliu/readthedocs.org,laplaceliu/readthedocs.org,asampat3090/readthedocs.org,kenwang76/readthedocs.org,Tazer/readthedocs.org,LukasBoersma/readthedocs.org,kdkeyser/readthedocs.org
readthedocs/projects/search_indexes.py
readthedocs/projects/search_indexes.py
# -*- coding: utf-8-*- import codecs import os from django.utils.html import strip_tags #from haystack import site from haystack import indexes from haystack.fields import CharField #from celery_haystack.indexes import SearchIndex from projects.models import File, ImportedFile, Project import logging log = logging.getLogger(__name__) class ProjectIndex(indexes.SearchIndex, indexes.Indexable): text = CharField(document=True, use_template=True) author = CharField() title = CharField(model_attr='name') description = CharField(model_attr='description') repo_type = CharField(model_attr='repo_type') def prepare_author(self, obj): return obj.users.all()[0] def get_model(self): return Project class FileIndex(indexes.SearchIndex, indexes.Indexable): text = CharField(document=True, use_template=True) author = CharField() project = CharField(model_attr='project__name', faceted=True) title = CharField(model_attr='heading') def prepare_author(self, obj): return obj.project.users.all()[0] def get_model(self): return File #Should prob make a common subclass for this and FileIndex class ImportedFileIndex(indexes.SearchIndex, indexes.Indexable): text = CharField(document=True) author = CharField() project = CharField(model_attr='project__name', faceted=True) title = CharField(model_attr='name') def prepare_author(self, obj): return obj.project.users.all()[0] def prepare_text(self, obj): """ Prepare the text of the html file. This only works on machines that have the html files for the projects checked out. """ #Import this here to hopefully fix tests for now. from pyquery import PyQuery full_path = obj.project.rtd_build_path() file_path = os.path.join(full_path, obj.path.lstrip('/')) try: with codecs.open(file_path, encoding='utf-8', mode='r') as f: content = f.read() except IOError as e: log.info('Unable to index file: %s, error :%s' % (file_path, e)) return log.debug('Indexing %s' % obj.slug) try: to_index = strip_tags(PyQuery(content)("div.document").html()).replace(u'¶', '') except ValueError: #Pyquery returns ValueError if div.document doesn't exist. return return to_index def get_model(self): return ImportedFile
# -*- coding: utf-8-*- import codecs import os from django.utils.html import strip_tags #from haystack import site from haystack import indexes from haystack.fields import CharField #from celery_haystack.indexes import SearchIndex from projects.models import File, ImportedFile, Project import logging log = logging.getLogger(__name__) class ProjectIndex(indexes.SearchIndex, indexes.Indexable): text = CharField(document=True, use_template=True) author = CharField() title = CharField(model_attr='name') description = CharField(model_attr='description') repo_type = CharField(model_attr='repo_type') def prepare_author(self, obj): return obj.users.all()[0] class FileIndex(indexes.SearchIndex, indexes.Indexable): text = CharField(document=True, use_template=True) author = CharField() project = CharField(model_attr='project__name', faceted=True) title = CharField(model_attr='heading') def prepare_author(self, obj): return obj.project.users.all()[0] #Should prob make a common subclass for this and FileIndex class ImportedFileIndex(indexes.SearchIndex, indexes.Indexable): text = CharField(document=True) author = CharField() project = CharField(model_attr='project__name', faceted=True) title = CharField(model_attr='name') def prepare_author(self, obj): return obj.project.users.all()[0] def prepare_text(self, obj): """ Prepare the text of the html file. This only works on machines that have the html files for the projects checked out. """ #Import this here to hopefully fix tests for now. from pyquery import PyQuery full_path = obj.project.rtd_build_path() file_path = os.path.join(full_path, obj.path.lstrip('/')) try: with codecs.open(file_path, encoding='utf-8', mode='r') as f: content = f.read() except IOError as e: log.info('Unable to index file: %s, error :%s' % (file_path, e)) return log.debug('Indexing %s' % obj.slug) try: to_index = strip_tags(PyQuery(content)("div.document").html()).replace(u'¶', '') except ValueError: #Pyquery returns ValueError if div.document doesn't exist. return return to_index
mit
Python
96877f2cb706a465c5e7fb4d316dbd82ff2cb432
add comment
numerodix/purelyjs,numerodix/purelyjs
purelyjs/interpreter.py
purelyjs/interpreter.py
from .io import invoke class Interpreter(object): known_engines = ['js', 'rhino'] def __init__(self, exes=None): engines = exes if exes else self.known_engines self.exe = self.detect(engines) if not self.exe: raise ValueError("No js engine could be found, tried: %s" % ', '.join(engines)) def detect(self, engines): found = None for engine in engines: # NOTE: Very platform specific success, stdout, stderr = invoke(['which', engine]) if success: found = stdout break return found def run_module(self, filepath): success, stdout, stderr = invoke([self.exe, filepath]) return success, stderr
from .io import invoke class Interpreter(object): known_engines = ['js', 'rhino'] def __init__(self, exes=None): engines = exes if exes else self.known_engines self.exe = self.detect(engines) if not self.exe: raise ValueError("No js engine could be found, tried: %s" % ', '.join(engines)) def detect(self, engines): found = None for engine in engines: success, stdout, stderr = invoke(['which', engine]) if success: found = stdout break return found def run_module(self, filepath): success, stdout, stderr = invoke([self.exe, filepath]) return success, stderr
mit
Python
5aa90e98abcfafa9036f8cc19cd49b33aa638181
update dev version after 0.26.0 tag [skip ci]
desihub/desispec,desihub/desispec
py/desispec/_version.py
py/desispec/_version.py
__version__ = '0.26.0.dev3104'
__version__ = '0.26.0'
bsd-3-clause
Python
b59b0e12a0f5fc83d69d9eaa1f7652e8e1b4ac81
Improve tuple and list converters
dean0x7d/pybinding,MAndelkovic/pybinding,MAndelkovic/pybinding,dean0x7d/pybinding,dean0x7d/pybinding,MAndelkovic/pybinding
pybinding/utils/misc.py
pybinding/utils/misc.py
from functools import wraps import numpy as np def to_tuple(o): try: return tuple(o) except TypeError: return (o,) if o is not None else () def to_list(o): try: return list(o) except TypeError: return [o] if o is not None else [] def with_defaults(options: dict, defaults_dict: dict=None, **defaults_kwargs): """Return a dict where missing keys are filled in by defaults >>> options = dict(hello=0) >>> with_defaults(options, hello=4, world=5) == dict(hello=0, world=5) True >>> defaults = dict(hello=4, world=5) >>> with_defaults(options, defaults) == dict(hello=0, world=5) True >>> with_defaults(options, defaults, world=7, yes=3) == dict(hello=0, world=5, yes=3) True """ options = options if options else {} if defaults_dict: options = dict(defaults_dict, **options) return dict(defaults_kwargs, **options) def x_pi(value): """Return str of value in 'multiples of pi' latex representation >>> x_pi(6.28) == r"$2\pi$" True >>> x_pi(3) == r"$0.95\pi$" True >>> x_pi(-np.pi) == r"$-\pi$" True >>> x_pi(0) == "0" True """ n = value / np.pi if np.isclose(n, 0): return "0" elif np.isclose(abs(n), 1): return r"$\pi$" if n > 0 else r"$-\pi$" else: return r"${:.2g}\pi$".format(n) def decorator_decorator(decorator_wrapper): """A decorator decorator which allows it to be used with or without arguments Parameters ---------- decorator_wrapper : Callable[[Any], Callable] Examples -------- >>> @decorator_decorator ... def decorator_wrapper(optional="default"): ... def actual_decorator(func): ... return lambda x: func(x, optional) ... return actual_decorator >>> @decorator_wrapper("hello") ... def foo(x, y): ... print(x, y) >>> foo(1) 1 hello >>> @decorator_wrapper ... def bar(x, y): ... print(x, y) >>> bar(2) 2 default """ @wraps(decorator_wrapper) def new_wrapper(*args, **kwargs): if len(args) == 1 and not kwargs and (isinstance(args[0], type) or callable(args[0])): return decorator_wrapper()(args[0]) else: return lambda cls_or_func: decorator_wrapper(*args, **kwargs)(cls_or_func) return new_wrapper
from functools import wraps import numpy as np def to_tuple(o): if isinstance(o, (tuple, list)): return tuple(o) else: return o, def with_defaults(options: dict, defaults_dict: dict=None, **defaults_kwargs): """Return a dict where missing keys are filled in by defaults >>> options = dict(hello=0) >>> with_defaults(options, hello=4, world=5) == dict(hello=0, world=5) True >>> defaults = dict(hello=4, world=5) >>> with_defaults(options, defaults) == dict(hello=0, world=5) True >>> with_defaults(options, defaults, world=7, yes=3) == dict(hello=0, world=5, yes=3) True """ options = options if options else {} if defaults_dict: options = dict(defaults_dict, **options) return dict(defaults_kwargs, **options) def x_pi(value): """Return str of value in 'multiples of pi' latex representation >>> x_pi(6.28) == r"$2\pi$" True >>> x_pi(3) == r"$0.95\pi$" True >>> x_pi(-np.pi) == r"$-\pi$" True >>> x_pi(0) == "0" True """ n = value / np.pi if np.isclose(n, 0): return "0" elif np.isclose(abs(n), 1): return r"$\pi$" if n > 0 else r"$-\pi$" else: return r"${:.2g}\pi$".format(n) def decorator_decorator(decorator_wrapper): """A decorator decorator which allows it to be used with or without arguments Parameters ---------- decorator_wrapper : Callable[[Any], Callable] Examples -------- >>> @decorator_decorator ... def decorator_wrapper(optional="default"): ... def actual_decorator(func): ... return lambda x: func(x, optional) ... return actual_decorator >>> @decorator_wrapper("hello") ... def foo(x, y): ... print(x, y) >>> foo(1) 1 hello >>> @decorator_wrapper ... def bar(x, y): ... print(x, y) >>> bar(2) 2 default """ @wraps(decorator_wrapper) def new_wrapper(*args, **kwargs): if len(args) == 1 and not kwargs and (isinstance(args[0], type) or callable(args[0])): return decorator_wrapper()(args[0]) else: return lambda cls_or_func: decorator_wrapper(*args, **kwargs)(cls_or_func) return new_wrapper
bsd-2-clause
Python
ee5a85df1d2db8babd8d6df6a188137051c3a48e
Change the improvement policies due to reorganizing reggie.
mwhoffman/pybo
pybo/policies/simple.py
pybo/policies/simple.py
""" Acquisition functions based on the probability or expected value of improvement. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np __all__ = ['EI', 'PI', 'UCB', 'Thompson'] def EI(model, _, xi=0.0): """ Expected improvement policy with an exploration parameter of `xi`. """ target = model.predict(model.data[0])[0].max() + xi def index(X, grad=False): """EI policy instance.""" return model.get_improvement(X, target, grad) return index def PI(model, _, xi=0.05): """ Probability of improvement policy with an exploration parameter of `xi`. """ target = model.predict(model.data[0])[0].max() + xi def index(X, grad=False): """PI policy instance.""" return model.get_tail(X, target, grad) return index def Thompson(model, _, n=100, rng=None): """ Thompson sampling policy. """ return model.sample_f(n, rng).get def UCB(model, _, delta=0.1, xi=0.2): """ The (GP)UCB acquisition function where `delta` is the probability that the upper bound holds and `xi` is a multiplicative modification of the exploration factor. """ d = model.ndata a = xi * 2 * np.log(np.pi**2 / 3 / delta) b = xi * (4 + d) def index(X, grad=False): """UCB policy instance.""" posterior = model.predict(X, grad=grad) mu, s2 = posterior[:2] beta = a + b * np.log(model.ndata + 1) if grad: dmu, ds2 = posterior[2:] return (mu + np.sqrt(beta * s2), dmu + 0.5 * np.sqrt(beta / s2[:, None]) * ds2) else: return mu + np.sqrt(beta * s2) return index
""" Acquisition functions based on the probability or expected value of improvement. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np __all__ = ['EI', 'PI', 'UCB', 'Thompson'] def EI(model, _, xi=0.0): """ Expected improvement policy with an exploration parameter of `xi`. """ X = model.data[0] x = X[model.predict(X)[0].argmax()] def index(X, grad=False): """EI policy instance.""" return model.get_improvement(X, x, xi, grad) return index def PI(model, _, xi=0.05): """ Probability of improvement policy with an exploration parameter of `xi`. """ X = model.data[0] x = X[model.predict(X)[0].argmax()] def index(X, grad=False): """PI policy instance.""" return model.get_improvement(X, x, xi, grad, pi=True) return index def Thompson(model, _, n=100, rng=None): """ Thompson sampling policy. """ return model.sample_f(n, rng).get def UCB(model, _, delta=0.1, xi=0.2): """ The (GP)UCB acquisition function where `delta` is the probability that the upper bound holds and `xi` is a multiplicative modification of the exploration factor. """ d = model.ndata a = xi * 2 * np.log(np.pi**2 / 3 / delta) b = xi * (4 + d) def index(X, grad=False): """UCB policy instance.""" posterior = model.predict(X, grad=grad) mu, s2 = posterior[:2] beta = a + b * np.log(model.ndata + 1) if grad: dmu, ds2 = posterior[2:] return (mu + np.sqrt(beta * s2), dmu + 0.5 * np.sqrt(beta / s2[:, None]) * ds2) else: return mu + np.sqrt(beta * s2) return index
bsd-2-clause
Python
1a8d7797e691bd5959fc8f7cdc0371e39208aee7
Update version #
obsidianforensics/hindsight,obsidianforensics/hindsight
pyhindsight/__init__.py
pyhindsight/__init__.py
__author__ = "Ryan Benson" __version__ = "2.1.0" __email__ = "[email protected]"
__author__ = "Ryan Benson" __version__ = "2.0.5" __email__ = "[email protected]"
apache-2.0
Python
c8b89d104d5676c006b39825fcdb4a1e80f6515a
Update wingding
henry232323/Pesterchum-Discord,henry232323/Pesterchum-Discord
pyquirks/quirk_funcs.py
pyquirks/quirk_funcs.py
from random import sample _wdalpha = { "!": 9999, "\"": 9986, "#": 9985, "$": 128083, "%": 128365, "&": 128366, "'": 128367, "(": 9742, ")": 9990, "*": 128386, "+": 128387, ",": 128234, "-": 128235, ".": 128236, "/": 128237, "0": 128193, "1": 128194, "2": 128196, "3": 128463, "4": 128464, "5": 128452, "6": 8987, "7": 128430, "8": 128432, "9": 128434, ":": 128435, ";": 128436, "<": 128427, "=": 128428, ">": 9991, "?": 9997, "@": 128398, "A": 9996, "B": 128076, "C": 128077, "D": 128078, "E": 9756, "F": 9758, "G": 9757, "H": 9759, "I": 9995, "J": 9786, "K": 128528, "L": 9785, "M": 128163, "N": 9760, "O": 9872, "P": 127985, "Q": 9992, "R": 9788, "S": 128167, "T": 10052, "U": 128326, "V": 10014, "W": 128328, "X": 10016, "Y": 10017, "Z": 9770, "[": 9775, "\\": 2384, "]": 9784, "^": 9800, "_": 9801, "`": 9802, "{": 10048, "|": 10047, "}": 10077, "~": 10078, "a": 9803, "b": 9804, "c": 9805, "d": 9806, "e": 9807, "f": 9808, "g": 9809, "h": 9810, "i": 9811, "k": 38, "l": 9679, "m": 10061, "n": 9632, "o": 9633, "q": 10065, "r": 10066, "s": 11047, "t": 10731, "u": 9670, "v": 10070, "w": 11045, "x": 8999, "y": 9043, "z": 8984, "p": 128912, "j": 128624 } def lower(str): return str.lower() def reverse(str): return reverse(str) def upper(str): return str.upper() def scramble(text): return "".join(sample(text, len(text))) def capitalize(str): return str.capitalize() def wingding(str): f = "" for char in str: try: char = chr(_wdalpha[char]) except KeyError: pass f += char return f
from random import sample _wdalpha = { "!": 9999, "\"": 9986, "#": 9985, "$": 128083, "%": 128365, "&": 128366, "'": 128367, "(": 9742, ")": 9990, "*": 128386, "+": 128387, ",": 128234, "-": 128235, ".": 128236, "/": 128237, "0": 128193, "1": 128194, "2": 128196, "3": 128463, "4": 128464, "5": 128452, "6": 8987, "7": 128430, "8": 128432, "9": 128434, ":": 128435, ";": 128436, "<": 128427, "=": 128428, ">": 9991, "?": 9997, "@": 128398, "A": 9996, "B": 128076, "C": 128077, "D": 128078, "E": 9756, "F": 9758, "G": 9757, "H": 9759, "I": 9995, "J": 9786, "K": 128528, "L": 9785, "M": 128163, "N": 9760, "O": 9872, "P": 127985, "Q": 9992, "R": 9788, "S": 128167, "T": 10052, "U": 128326, "V": 10014, "W": 128328, "X": 10016, "Y": 10017, "Z": 9770, "[": 9775, "\\": 2384, "]": 9784, "^": 9800, "_": 9801, "`": 9802, "{": 10048, "|": 10047, "}": 10077, "~": 10078, "a": 9803, "b": 9804, "c": 9805, "d": 9806, "e": 9807, "f": 9808, "g": 9809, "h": 9810, "i": 9811, "k": 38, "l": 9679, "m": 10061, "n": 9632, "o": 9633, "q": 10065, "r": 10066, "s": 11047, "t": 10731, "u": 9670, "v": 10070, "w": 11045, "x": 8999, "y": 9043, "z": 8984 } def lower(str): return str.lower() def reverse(str): return reverse(str) def upper(str): return str.upper() def scramble(text): return "".join(sample(text, len(text))) def capitalize(str): return str.capitalize() def wingding(str): f = "" for char in str: try: char = chr(_wdalpha[char]) except KeyError: pass f += char return f
mit
Python
ea6f60838ae309e5fb0662b2416d3c4450be7823
correct straight function
fluz/udacity
design_of_computer_programs_cs212/lesson01/poker_game.py
design_of_computer_programs_cs212/lesson01/poker_game.py
def poker(hands): """Return the best hand: poker([hand,...]) => hand""" return max(hands, key=hand_rank) def hand_rank(hand): """define a rank for a specific hand""" ranks = card_ranks(hand) if straight(ranks) and flush(hand): # straight flush return (8, max(ranks)) elif kind(4, ranks): # 4 of a kind return (7, kind(4, ranks), kind(1, ranks)) elif kind(3, ranks) and kind(2, ranks): # full house return (6, kind(3, ranks), kind(2, ranks)) elif flush(hand): # flush return (5, ranks) elif straight(ranks): # straight return (4, max(ranks)) elif kind(3, ranks): # 3 of a kind return (3, kind(3, ranks), ranks) elif two_pair(ranks): # 2 pair return (2, two_pair(ranks), ranks) elif kind(2, ranks): # kind return (1, kind(2, ranks), ranks) else: # high card return (0, ranks) def card_ranks(cards): """ Return a list of the ranks, sorted with higher first""" ranks = ["--23456789TJQKA".index(r) for r, s in cards] ranks.sort(reverse=True) return [5, 4, 3, 2, 1] if ranks == [14, 5, 4, 3, 2] else ranks def straight(ranks): """ Return True if the ordered ranks from a 5 card straight""" return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5 def flush(hand): """ Return True if all cards have the same suit""" suits = [s for r, s in hand] return len(set(suits)) == 1 def kind(n, ranks): """ Return the first rank that this hand has exactly n and return None otherwise""" for r in ranks: if ranks.count(r) == n: return r return None def two_pair(ranks): """If there are two pair, return the two ranks as a tuple: (highest, lowest); otherwise return None.""" pair_highest = kind(2, ranks) pair_lowest = kind(2, list(reversed(ranks))) if pair_highest and pair_highest != pair_lowest: return (pair_highest, pair_lowest) return None
def poker(hands): """Return the best hand: poker([hand,...]) => hand""" return max(hands, key=hand_rank) def hand_rank(hand): """define a rank for a specific hand""" ranks = card_ranks(hand) if straight(ranks) and flush(hand): # straight flush return (8, max(ranks)) elif kind(4, ranks): # 4 of a kind return (7, kind(4, ranks), kind(1, ranks)) elif kind(3, ranks) and kind(2, ranks): # full house return (6, kind(3, ranks), kind(2, ranks)) elif flush(hand): # flush return (5, ranks) elif straight(ranks): # straight return (4, max(ranks)) elif kind(3, ranks): # 3 of a kind return (3, kind(3, ranks), ranks) elif two_pair(ranks): # 2 pair return (2, two_pair(ranks), ranks) elif kind(2, ranks): # kind return (1, kind(2, ranks), ranks) else: # high card return (0, ranks) def card_ranks(cards): """ Return a list of the ranks, sorted with higher first""" ranks = ["--23456789TJQKA".index(r) for r, s in cards] ranks.sort(reverse=True) return ranks def straight(ranks): """ Return True if the ordered ranks from a 5 card straight""" return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5 def flush(hand): """ Return True if all cards have the same suit""" suits = [s for r, s in hand] return len(set(suits)) == 1 def kind(n, ranks): """ Return the first rank that this hand has exactly n and return None otherwise""" for r in ranks: if ranks.count(r) == n: return r return None def two_pair(ranks): """If there are two pair, return the two ranks as a tuple: (highest, lowest); otherwise return None.""" pair_highest = kind(2, ranks) pair_lowest = kind(2, list(reversed(ranks))) if pair_highest and pair_highest != pair_lowest: return (pair_highest, pair_lowest) return None
mit
Python
56c3c373563a38991da72bc235d4e3e40e711968
Use extra space.
fossilet/leetcode,deput/leetcode,fossilet/leetcode,deput/leetcode
remove_duplicates_from_sorted_array.py
remove_duplicates_from_sorted_array.py
#! /usr/bin/env python3 """ http://oj.leetcode.com/problems/remove-duplicates-from-sorted-array/ Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length. Do not allocate extra space for another array, you must do this in place with constant memory. For example, Given input array A = [1,1,2], Your function should return length = 2, and A is now [1,2]. Since Apr-22-2014 18:16 """ class Solution: # @param a list of integers # @return an integer def removeDuplicates(self, a): global A l = len(a) if l in (0, 1): return l else: i = 0 B = [] while i < l: if i == l - 1: B.append(a[i]) break if a[i] == a[i + 1]: i += 1 else: B.append(a[i]) i += 1 A = list(B) return len(A) if __name__ == '__main__': s = Solution() A = [] assert s.removeDuplicates(A) == 0 assert A == [] A = [1] assert s.removeDuplicates(A) == 1 assert A == [1] A = [1, 1, 2] assert s.removeDuplicates(A) == 2 assert A == [1, 2] A = [1, 1, 2, 3, 4, 4, 5, 5] assert s.removeDuplicates(A) == 5 assert A == [1, 2, 3, 4, 5] A = [1, 2, 3, 4, 5] assert s.removeDuplicates(A) == 5 assert A == [1, 2, 3, 4, 5]
#! /usr/bin/env python3 """ http://oj.leetcode.com/problems/remove-duplicates-from-sorted-array/ Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length. Do not allocate extra space for another array, you must do this in place with constant memory. For example, Given input array A = [1,1,2], Your function should return length = 2, and A is now [1,2]. Since Apr-22-2014 18:16 """ class Solution: # @param a list of integers # @return an integer def removeDuplicates(self, A): L = len(A) if L in (0, 1): return L else: i = 0 while i <= L - 2: if A[i] == A[i + 1]: A.remove(A[i]) L = len(A) i += 1 return len(A) if __name__ == '__main__': s = Solution() A = [] assert s.removeDuplicates(A) == 0 assert A == [] A = [1] assert s.removeDuplicates(A) == 1 assert A == [1] A = [1, 1, 2] assert s.removeDuplicates(A) == 2 assert A == [1, 2] A = [1, 1, 2, 3, 4, 4, 5, 5] assert s.removeDuplicates(A) == 5 assert A == [1, 2, 3, 4, 5] A = [1, 2, 3, 4, 5] assert s.removeDuplicates(A) == 5 assert A == [1, 2, 3, 4, 5]
mit
Python
c1cbdf20e6c109ff1586f663cab7e24f1716af08
Make remove-if-exists function public
ascoderu/opwen-cloudserver,ascoderu/opwen-cloudserver
opwen_email_server/utils/temporary.py
opwen_email_server/utils/temporary.py
from contextlib import contextmanager from contextlib import suppress from os import close from os import remove from tempfile import mkstemp from typing import Generator def create_tempfilename() -> str: file_descriptor, filename = mkstemp() close(file_descriptor) return filename @contextmanager def removing(path: str) -> Generator[str, None, None]: try: yield path finally: remove_if_exists(path) def remove_if_exists(path: str): with suppress(FileNotFoundError): remove(path)
from contextlib import contextmanager from contextlib import suppress from os import close from os import remove from tempfile import mkstemp from typing import Generator def create_tempfilename() -> str: file_descriptor, filename = mkstemp() close(file_descriptor) return filename @contextmanager def removing(path: str) -> Generator[str, None, None]: try: yield path finally: _remove_if_exists(path) def _remove_if_exists(path: str): with suppress(FileNotFoundError): remove(path)
apache-2.0
Python
94763abaf573bfd25cad06da0cffc6b94a7dedc8
Fix a flaw in the old implementation of checking whether the state has changed.
boronine/discipline,boronine/discipline
pervert/management/commands/pervert_migrate.py
pervert/management/commands/pervert_migrate.py
import json from django.core.management.base import BaseCommand, CommandError from pervert.models import AbstractPervert, SchemaState, PervertError class Command(BaseCommand): help = "Registers new schema for Pervert-controlled models" def handle(self, *args, **options): states = [] print "Reading the schema of Pervert-controlled models..." state_text = "" for cl in AbstractPervert.__subclasses__(): state = { "app_label": cl._meta.app_label, "model": cl._meta.object_name, "fields": [], "fks": [] } state_text += "%s.models.%s\n" % (state["app_label"], state["model"],) for field in cl._meta.fields: state_text += " * %s\n" % field.name if field.name == "uid": continue if field.__class__.__name__ == "ForeignKey": state["fks"].append(field.name) else: state["fields"].append(field.name) # Sort to make sure there is a unique json representation of each state states.append(state) # If the json is identical to the last saved state if SchemaState.objects.count() and \ json.loads(SchemaState.objects.order_by("-when")[0].state) == states: print "The state hasn't changed, nothing to do." else: # Save new state ss = SchemaState(state = json.dumps(states)) ss.save() print state_text + "SchemaState saved on %s" % ss.when
import json from django.core.management.base import BaseCommand, CommandError from pervert.models import AbstractPervert, SchemaState, PervertError class Command(BaseCommand): help = "Registers new schema for Pervert-controlled models" def handle(self, *args, **options): states = [] print "Reading the schema of Pervert-controlled models..." state_text = "" for cl in AbstractPervert.__subclasses__(): state = { "app_label": cl._meta.app_label, "model": cl._meta.object_name, "fields": [], "fks": [] } state_text += "%s.models.%s\n" % (state["app_label"], state["model"],) for field in cl._meta.fields: state_text += " * %s\n" % field.name if field.name == "uid": continue if field.__class__.__name__ == "ForeignKey": state["fks"].append(field.name) else: state["fields"].append(field.name) # Sort to make sure there is a unique json representation of each state state["fields"].sort() state["fks"].sort() states.append(state) jsonstate = json.dumps(states) # If the json is identical to the last saved state if SchemaState.objects.count() and \ SchemaState.objects.order_by("-when")[0].state == jsonstate: print "The state hasn't changed, nothing to do." else: # Save new state ss = SchemaState(state = json.dumps(states)) ss.save() print state_text + "SchemaState saved on %s" % ss.when
mit
Python
24c83211588ac71492640ce43e3a893e05466a54
Change old membership migration to null
pbanaszkiewicz/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy
amy/workshops/migrations/0065_multiple_memberships.py
amy/workshops/migrations/0065_multiple_memberships.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('workshops', '0064_membership'), ] operations = [ migrations.RemoveField( model_name='host', name='membership', ), migrations.AddField( model_name='membership', name='host', # the default value of 1 here doesn't break anything, because # migrations 0064-0065 should be applied together field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=1, to='workshops.Host', null=True), preserve_default=False, ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('workshops', '0064_membership'), ] operations = [ migrations.RemoveField( model_name='host', name='membership', ), migrations.AddField( model_name='membership', name='host', # the default value of 1 here doesn't break anything, because # migrations 0064-0065 should be applied together field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=1, to='workshops.Host'), preserve_default=False, ), ]
mit
Python
9bc8b9967631064821112b5c7ff3b65fb0b176f6
Fix wrong column name in db migration script of ryu plugin
klmitch/neutron,leeseulstack/openstack,sasukeh/neutron,asgard-lab/neutron,oeeagle/quantum,asgard-lab/neutron,apporc/neutron,mahak/neutron,dhanunjaya/neutron,silenci/neutron,takeshineshiro/neutron,NeCTAR-RC/neutron,miyakz1192/neutron,klmitch/neutron,jumpojoy/neutron,wenhuizhang/neutron,igor-toga/local-snat,leeseuljeong/leeseulstack_neutron,netscaler/neutron,CiscoSystems/vespa,shahbazn/neutron,kaiweifan/neutron,dims/neutron,ntt-sic/neutron,Juniper/contrail-dev-neutron,wenhuizhang/neutron,mattt416/neutron,glove747/liberty-neutron,skyddv/neutron,sebrandon1/neutron,CiscoSystems/vespa,projectcalico/calico-neutron,gkotton/neutron,virtualopensystems/neutron,beagles/neutron_hacking,sasukeh/neutron,pnavarro/neutron,rdo-management/neutron,redhat-openstack/neutron,adelina-t/neutron,yamahata/neutron,openstack/neutron,jerryz1982/neutron,mandeepdhami/neutron,oeeagle/quantum,cernops/neutron,zhhf/charging,kaiweifan/neutron,infobloxopen/neutron,neoareslinux/neutron,bgxavier/neutron,mahak/neutron,Juniper/neutron,watonyweng/neutron,Juniper/contrail-dev-neutron,blueboxgroup/neutron,projectcalico/calico-neutron,bigswitch/neutron,vijayendrabvs/ssl-neutron,beagles/neutron_hacking,Comcast/neutron,mmnelemane/neutron,zhhf/charging,kaiweifan/neutron,wolverineav/neutron,sajuptpm/neutron-ipam,chitr/neutron,rickerc/neutron_audit,sajuptpm/neutron-ipam,virtualopensystems/neutron,vivekanand1101/neutron,paninetworks/neutron,SmartInfrastructures/neutron,noironetworks/neutron,CiscoSystems/neutron,SamYaple/neutron,yuewko/neutron,mandeepdhami/neutron,mahak/neutron,MaximNevrov/neutron,antonioUnina/neutron,jumpojoy/neutron,Metaswitch/calico-neutron,CiscoSystems/vespa,cloudbase/neutron-virtualbox,infobloxopen/neutron,watonyweng/neutron,suneeth51/neutron,vveerava/Openstack,aristanetworks/neutron,CiscoSystems/neutron,apporc/neutron,rickerc/neutron_audit,takeshineshiro/neutron,virtualopensystems/neutron,leeseuljeong/leeseulstack_neutron,JioCloud/neutron,yamahata/neutron,Stavitsky/neutron,rdo-management/neutron,gopal1cloud/neutron,gkotton/neutron,Comcast/neutron,vivekanand1101/neutron,yanheven/neutron,shahbazn/neutron,alexandrucoman/vbox-neutron-agent,rickerc/neutron_audit,vveerava/Openstack,cisco-openstack/neutron,adelina-t/neutron,magic0704/neutron,mmnelemane/neutron,gkotton/neutron,jerryz1982/neutron,swdream/neutron,vbannai/neutron,blueboxgroup/neutron,cloudbase/neutron,netscaler/neutron,sajuptpm/neutron-ipam,huntxu/neutron,paninetworks/neutron,bigswitch/neutron,CiscoSystems/neutron,yuewko/neutron,silenci/neutron,jacknjzhou/neutron,chitr/neutron,citrix-openstack-build/neutron,citrix-openstack-build/neutron,vijayendrabvs/hap,bgxavier/neutron,Stavitsky/neutron,dims/neutron,SmartInfrastructures/neutron,yamahata/neutron,wolverineav/neutron,ntt-sic/neutron,MaximNevrov/neutron,dhanunjaya/neutron,zhhf/charging,vijayendrabvs/hap,neoareslinux/neutron,vijayendrabvs/ssl-neutron,huntxu/neutron,waltBB/neutron_read,miyakz1192/neutron,javaos74/neutron,eonpatapon/neutron,vbannai/neutron,cloudbase/neutron-virtualbox,leeseulstack/openstack,SamYaple/neutron,waltBB/neutron_read,aristanetworks/neutron,yamahata/tacker,vijayendrabvs/ssl-neutron,vbannai/neutron,Metaswitch/calico-neutron,noironetworks/neutron,javaos74/neutron,openstack/neutron,swdream/neutron,yanheven/neutron,suneeth51/neutron,beagles/neutron_hacking,redhat-openstack/neutron,NeCTAR-RC/neutron,leeseuljeong/leeseulstack_neutron,ntt-sic/neutron,alexandrucoman/vbox-neutron-agent,netscaler/neutron,igor-toga/local-snat,cisco-openstack/neutron,openstack/neutron,mattt416/neutron,cernops/neutron,JioCloud/neutron,Juniper/contrail-dev-neutron,jacknjzhou/neutron,pnavarro/neutron,Juniper/neutron,eayunstack/neutron,barnsnake351/neutron,barnsnake351/neutron,skyddv/neutron,magic0704/neutron,cloudbase/neutron,eonpatapon/neutron,Juniper/neutron,JianyuWang/neutron,sebrandon1/neutron,vveerava/Openstack,leeseulstack/openstack,blueboxgroup/neutron,JianyuWang/neutron,Comcast/neutron,eayunstack/neutron,glove747/liberty-neutron,antonioUnina/neutron,yamahata/tacker,citrix-openstack-build/neutron,yamahata/tacker,vijayendrabvs/hap,gopal1cloud/neutron
neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py
neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Mark McClain, DreamHost """ryu This retroactively provides migration support for https://review.openstack.org/#/c/11204/ Revision ID: 5a875d0e5c Revises: 2c4af419145b Create Date: 2012-12-18 12:32:04.482477 """ # revision identifiers, used by Alembic. revision = '5a875d0e5c' down_revision = '2c4af419145b' # Change to ['*'] if this migration applies to all plugins migration_for_plugins = [ 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' ] from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.create_table( 'tunnelkeys', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('tunnel_key', sa.Integer(), autoincrement=False, nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('tunnel_key') ) op.create_table( 'tunnelkeylasts', sa.Column('last_key', sa.Integer(), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('last_key') ) def downgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.drop_table('tunnelkeylasts') op.drop_table('tunnelkeys')
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Mark McClain, DreamHost """ryu This retroactively provides migration support for https://review.openstack.org/#/c/11204/ Revision ID: 5a875d0e5c Revises: 2c4af419145b Create Date: 2012-12-18 12:32:04.482477 """ # revision identifiers, used by Alembic. revision = '5a875d0e5c' down_revision = '2c4af419145b' # Change to ['*'] if this migration applies to all plugins migration_for_plugins = [ 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' ] from alembic import op import sqlalchemy as sa from neutron.db import migration def upgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.create_table( 'tunnelkeys', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('last_key', sa.Integer(), autoincrement=False, nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('last_key') ) op.create_table( 'tunnelkeylasts', sa.Column('last_key', sa.Integer(), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('last_key') ) def downgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.drop_table('tunnelkeylasts') op.drop_table('tunnelkeys')
apache-2.0
Python
06f7f0b5d45a4349ee688aaac86b57c74ad0f76c
FIX geocoder model
eicher31/compassion-switzerland,eicher31/compassion-switzerland,ecino/compassion-switzerland,CompassionCH/compassion-switzerland,CompassionCH/compassion-switzerland,ecino/compassion-switzerland,eicher31/compassion-switzerland,ecino/compassion-switzerland,CompassionCH/compassion-switzerland
partner_compassion/models/base_geocoder.py
partner_compassion/models/base_geocoder.py
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2019 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <[email protected]> # # The licence is in the file __manifest__.py # ############################################################################## import logging from odoo import models _logger = logging.getLogger(__name__) class Geocoder(models.AbstractModel): _inherit = 'base.geocoder' def _raise_internet_access_error(self, error): # Don't raise error _logger.error( "Cannot contact geolocation servers. Please make sure that your " "Internet connection is up and running (%s).", error)
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2019 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <[email protected]> # # The licence is in the file __manifest__.py # ############################################################################## import logging from odoo import models _logger = logging.getLogger(__name__) class Geocoder(models.Model): _inherit = 'base.geocoder' def _raise_internet_access_error(self, error): # Don't raise error _logger.error( "Cannot contact geolocation servers. Please make sure that your " "Internet connection is up and running (%s).", error)
agpl-3.0
Python
397f33adb5cafaeda3de624dc9dd1bb24d0b65e5
remove dup line
christabor/MoAL,christabor/MoAL,christabor/MoAL,christabor/MoAL,christabor/MoAL
MOAL/maths/applied/optimization/strength_reduction.py
MOAL/maths/applied/optimization/strength_reduction.py
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = """Chris Tabor ([email protected])""" if __name__ == '__main__': from os import getcwd from os import sys sys.path.append(getcwd()) from MOAL.helpers.display import Section from MOAL.helpers.trials import test_speed DEBUG = True if __name__ == '__main__' else False """Strength reduction is an optimization technique that involves taking a more 'advanced' math technique and breaking it down into simple 'dumb' tasks that can be repeated. For example, multiplication can be converted to lots of addition.""" @test_speed def exp(x, power): # 4^3 # ... 4 * 4 * 4 res = x for num in [x] * power: res = num * res return res @test_speed def strengthreduced_exp(x, power): # Replaces an exponential operation with a multiplication + addition # 4^3 = 64 # ... 4 * 4 * 4 = 64 # ... 2 + 2 + 2 + 2 + 2 ... (32 times) = 64 res = x for num in [x] * power: res = strengthreduced_mult(num, res) return res @test_speed def mult(x, y): return x * y @test_speed def strengthreduced_mult(x, y): # 2 * 4 = 8 # ... 2 + 2 + 2 + 2 = 8 res = 0 for f in xrange(y): res += x return res if DEBUG: with Section('Optimization - strength reduction'): # This is slower since the native multiplication is much much faster # than looping, but it demonstrates the idea. max = 200 f, g = exp(4, 2), strengthreduced_exp(4, 2) assert f == g print(f, g) f2, g2 = mult(2, 4), strengthreduced_mult(2, 4) assert f2 == g2 print(f2, g2)
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = """Chris Tabor ([email protected])""" __author__ = """Chris Tabor ([email protected])""" if __name__ == '__main__': from os import getcwd from os import sys sys.path.append(getcwd()) from MOAL.helpers.display import Section from MOAL.helpers.trials import test_speed DEBUG = True if __name__ == '__main__' else False """Strength reduction is an optimization technique that involves taking a more 'advanced' math technique and breaking it down into simple 'dumb' tasks that can be repeated. For example, multiplication can be converted to lots of addition.""" @test_speed def exp(x, power): # 4^3 # ... 4 * 4 * 4 res = x for num in [x] * power: res = num * res return res @test_speed def strengthreduced_exp(x, power): # Replaces an exponential operation with a multiplication + addition # 4^3 = 64 # ... 4 * 4 * 4 = 64 # ... 2 + 2 + 2 + 2 + 2 ... (32 times) = 64 res = x for num in [x] * power: res = strengthreduced_mult(num, res) return res @test_speed def mult(x, y): return x * y @test_speed def strengthreduced_mult(x, y): # 2 * 4 = 8 # ... 2 + 2 + 2 + 2 = 8 res = 0 for f in xrange(y): res += x return res if DEBUG: with Section('Optimization - strength reduction'): # This is slower since the native multiplication is much much faster # than looping, but it demonstrates the idea. max = 200 f, g = exp(4, 2), strengthreduced_exp(4, 2) assert f == g print(f, g) f2, g2 = mult(2, 4), strengthreduced_mult(2, 4) assert f2 == g2 print(f2, g2)
apache-2.0
Python
3f2f069e1c22ee88afb67ef68164046222a009e3
Create a error class for the API client
att-comdev/drydock,att-comdev/drydock
drydock_provisioner/error.py
drydock_provisioner/error.py
# Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json class DesignError(Exception): pass class StateError(Exception): pass class OrchestratorError(Exception): pass class TransientOrchestratorError(OrchestratorError): pass class PersistentOrchestratorError(OrchestratorError): pass class DriverError(Exception): pass class TransientDriverError(DriverError): pass class PersistentDriverError(DriverError): pass class ApiError(Exception): def __init__(self, msg, code=500): super().__init__(msg) self.message = msg self.status_code = code def to_json(self): err_dict = {'error': msg, 'type': self.__class__.__name__} return json.dumps(err_dict) class InvalidFormat(ApiError): def __init__(self, msg, code=400): super(InvalidFormat, self).__init__(msg, code=code) class ClientError(Exception): def __init__(self, msg, code=500): super().__init__(msg) self.message = msg self.status_code = code
# Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json class DesignError(Exception): pass class StateError(Exception): pass class OrchestratorError(Exception): pass class TransientOrchestratorError(OrchestratorError): pass class PersistentOrchestratorError(OrchestratorError): pass class DriverError(Exception): pass class TransientDriverError(DriverError): pass class PersistentDriverError(DriverError): pass class ApiError(Exception): def __init__(self, msg, code=500): super().__init__(msg) self.message = msg self.status_code = code def to_json(self): err_dict = {'error': msg, 'type': self.__class__.__name__}} return json.dumps(err_dict) class InvalidFormat(ApiError): def __init__(self, msg, code=400): super(InvalidFormat, self).__init__(msg, code=code)
apache-2.0
Python
46cd16ff56ff93b2ee8a38363b37c3287c9cb1cc
Update sal checkin module.
salopensource/sal-scripts,salopensource/sal-scripts,salopensource/sal-scripts
payload/usr/local/sal/checkin_modules/sal_checkin.py
payload/usr/local/sal/checkin_modules/sal_checkin.py
#!/usr/local/sal/Python.framework/Versions/3.8/bin/python3 import sal __version__ = '1.1.0' def main(): _, _, bu_key = sal.get_server_prefs() sal_submission = { 'extra_data': { 'sal_version': sal.__version__, 'key': bu_key,}, 'facts': {'checkin_module_version': __version__}} sal.set_checkin_results('Sal', sal_submission) if __name__ == "__main__": main()
#!/usr/local/sal/Python.framework/Versions/3.8/bin/python3 import sys import sal __version__ = '1.0.0' def main(): _, _, bu_key = sal.get_server_prefs() sal_submission = { 'extra_data': { 'sal_version': sal.__version__, 'key': bu_key,}, 'facts': {'checkin_module_version': __version__}} sal.set_checkin_results('Sal', sal_submission) if __name__ == "__main__": main()
apache-2.0
Python
55132ff6740b3c70ddb75dcf7c3615aaea0680ac
Fix typo
zujko/manage-vm,zujko/manage-vm,zujko/manage-vm
main/models.py
main/models.py
from django.db import models from django.contrib.auth.models import User class VM(models.Model): user = models.ForeignKey(User, related_name='user', null=False) vmid = models.PositiveIntegerField() template = models.CharField(max_length=100) hostname = models.CharField(max_length=30) storage = models.CharField(max_length=50) memory = models.PositiveIntegerField() swap = models.PositiveIntegerField() cores = models.PositiveSmallIntegerField() disk = models.PositiveIntegerField() description = models.CharField(max_length=200) ip = models.CharField(max_length=15) def __unicode__(self): return u'%s' % self.hostname class Limits(models.Model): memory = models.PositiveIntegerField() swap = models.PositiveIntegerField() cores = models.PositiveSmallIntegerField() disk = models.PositiveSmallIntegerField()
from django.db import models from django.contrib.auth.models import User class VM(models.Model): user = models.ForeignKey(User, related_name='user', null=False) vmid = models.PositiveIntegerField() template = models.CharField(max_length=100) hostname = models.CharField(max_length=30) storage = models.CharField(max_length=50) memory = models.PositiveIntegerField() swap = models.PositiveIntegerField() cores = models.PositiveSmallIntegerField() disk = models.PositiveIntegerField() description = models.CharField(max_length=200) ip = models.CharField(max_length=15) def __unicode__(self): return u'%s' % self.hostname def Limits(models.Model): memory = models.PositiveIntegerField() swap = models.PositiveIntegerField() cores = models.PositiveSmallIntegerField() disk = models.PositiveSmallIntegerField()
mit
Python
b6f54a008cfe1c0a6db06d4f9c23d4699c2ab901
Update harmonizer.py
Phantasus/intelmq,aaronkaplan/intelmq-old,s4n7h0/intelmq,aaronkaplan/intelmq-old,aaronkaplan/intelmq-old
intelmq/bots/inputs/openbl/harmonizer.py
intelmq/bots/inputs/openbl/harmonizer.py
from intelmq.lib.bot import Bot, sys class OpenBLHarmonizerBot(Bot): def process(self): event = self.receive_message() if event: event.add('feed', 'openbl') event.add('feed_url', 'http://www.openbl.org/lists/date_all.txt') ip_value = event.value('reported_ip') event.add('source_ip', ip_value) event.add('ip', ip_value) event.add('type', 'malware') # ??? self.send_message(event) self.acknowledge_message() if __name__ == "__main__": bot = OpenBLHarmonizerBot(sys.argv[1]) bot.start()
from intelmq.lib.bot import Bot, sys class OpenBLHarmonizerBot(Bot): def process(self): event = self.receive_message() if event: event.add('feed', 'openbl') event.add('feed_url', 'http://www.openbl.org/lists/date_all.txt') ip_value = event.value('reported_ip') event.add('source_ip', ip_value) event.add('ip', ip_value) event.add('type', 'brute-force') # ??? self.send_message(event) self.acknowledge_message() if __name__ == "__main__": bot = OpenBLHarmonizerBot(sys.argv[1]) bot.start()
agpl-3.0
Python
93904a11a78d5c58d2baaaa71cb962195becae6e
Change test.
avanzosc/event-wip
event_track_info/tests/test_track_info.py
event_track_info/tests/test_track_info.py
# -*- coding: utf-8 -*- # © 2016 Oihane Crucelaegui - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from openerp.addons.sale_order_create_event.tests.\ test_sale_order_create_event import TestSaleOrderCreateEvent class TestTrackInfo(TestSaleOrderCreateEvent): def setUp(self): super(TestTrackInfo, self).setUp() self.event_model = self.env['event.event'] self.url = 'www.example.com' self.planification = 'This is the planification' self.resolution = 'This is the resolution' self.html_info = 'This is the html_info' self.track_template = self.env['product.event.track.template'].create({ 'product_id': self.service_product.id, 'sequence': 0, 'name': 'Session 1', 'planification': self.planification, 'resolution': self.resolution, 'html_info': self.html_info, 'url': self.url, }) def test_sale_order_confirm(self): self.sale_order2.action_button_confirm() cond = [('sale_order_line', '=', self.sale_order2.order_line[0].id)] event = self.event_model.search(cond, limit=1) self.sale_order2.order_line[0].event_id = event.id self.sale_order2.action_button_confirm() for track in self.sale_order2.mapped('order_line.event_id.track_ids'): if track.url: self.assertEquals(track.url, self.url) if track.planification: self.assertEquals(track.planification, self.planification) if track.resolution: self.assertEquals(track.resolution, self.resolution)
# -*- coding: utf-8 -*- # © 2016 Oihane Crucelaegui - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from openerp.addons.sale_order_create_event.tests.\ test_sale_order_create_event_by_task import TestSaleOrderCreateEvent class TestTrackInfo(TestSaleOrderCreateEvent): def setUp(self): super(TestTrackInfo, self).setUp() self.url = 'www.example.com' self.planification = 'This is the planification' self.resolution = 'This is the resolution' self.html_info = 'This is the html_info' self.track_template = self.env['product.event.track.template'].create({ 'product_id': self.service_product.id, 'sequence': 0, 'name': 'Session 1', 'planification': self.planification, 'resolution': self.resolution, 'html_info': self.html_info, 'url': self.url, }) def test_sale_order_confirm(self): self.sale_order2.action_button_confirm() for track in self.sale_order2.mapped('order_line.event_id.track_ids'): self.assertEquals(track.url, self.url) self.assertEquals(track.planification, self.planification) self.assertEquals(track.resolution, self.resolution) self.assertEquals(track.html_info, self.html_info)
agpl-3.0
Python
d32f6dcfcc7bbf8f4d9a8d84673635b1345450f6
Simplify library includes
vigetlabs/dnsimple
dnsimple/__init__.py
dnsimple/__init__.py
from dnsimple.client import Client
mit
Python
9f790ebf51c7e05e09a39bd18f2597410ea0287d
bump version to 0.6.2
ivelum/djangoql,artinnok/djangoql,ivelum/djangoql,artinnok/djangoql,ivelum/djangoql,artinnok/djangoql
djangoql/__init__.py
djangoql/__init__.py
__version__ = '0.6.2'
__version__ = '0.6.1'
mit
Python
842e1bac8edaf6f28772067eaffd83351d28332a
add unicode
LeoHeo/fastube,LeoHeo/fastube,LeoHeo/fastube,LeoHeo/fastube
fastube/fastube/settings/partials/auth.py
fastube/fastube/settings/partials/auth.py
# -*- coding: utf-8 -*- import os # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Auth Model AUTH_USER_MODEL = "users.User" # Login LOGIN_URL = "/login/" SIGNUP_SUCCESS_MESSAGE = "성공적으로 회원가입이 되었습니다." LOGIN_SUCCESS_MESSAGE = "성공적으로 로그인이 되었습니다." LOGOUT_SUCCESS_MESSAGE = "성공적으로 로그아웃이 되었습니다." SOCIAL_AUTH_URL_NAMESPACE = 'social' AUTHENTICATION_BACKENDS = ( 'social.backends.facebook.FacebookOAuth2', 'social.backends.kakao.KakaoOAuth2', 'django.contrib.auth.backends.ModelBackend', ) SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get("SOCIAL_AUTH_FACEBOOK_KEY") SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get("SOCIAL_AUTH_FACEBOOK_SECRET") SOCIAL_AUTH_KAKAO_KEY = os.environ.get("SOCIAL_AUTH_KAKAO_KEY") SOCIAL_AUTH_KAKAO_SECRET = os.environ.get("SOCIAL_AUTH_KAKAO_SECRET") SOCIAL_AUTH_PIPELINE = ( 'social.pipeline.social_auth.social_details', 'social.pipeline.social_auth.social_uid', 'social.pipeline.social_auth.auth_allowed', 'social.pipeline.social_auth.social_user', 'social.pipeline.user.get_username', 'social.pipeline.social_auth.associate_by_email', 'social.pipeline.user.create_user', 'social.pipeline.social_auth.associate_user', 'social.pipeline.social_auth.load_extra_data', 'social.pipeline.user.user_details' ) SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
#-*- coding: utf-8 -*- import os # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Auth Model AUTH_USER_MODEL = "users.User" # Login LOGIN_URL = "/login/" SIGNUP_SUCCESS_MESSAGE = "성공적으로 회원가입이 되었습니다." LOGIN_SUCCESS_MESSAGE = "성공적으로 로그인이 되었습니다." LOGOUT_SUCCESS_MESSAGE = "성공적으로 로그아웃이 되었습니다." SOCIAL_AUTH_URL_NAMESPACE = 'social' AUTHENTICATION_BACKENDS = ( 'social.backends.facebook.FacebookOAuth2', 'social.backends.kakao.KakaoOAuth2', 'django.contrib.auth.backends.ModelBackend', ) SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get("SOCIAL_AUTH_FACEBOOK_KEY") SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get("SOCIAL_AUTH_FACEBOOK_SECRET") SOCIAL_AUTH_KAKAO_KEY = os.environ.get("SOCIAL_AUTH_KAKAO_KEY") SOCIAL_AUTH_KAKAO_SECRET = os.environ.get("SOCIAL_AUTH_KAKAO_SECRET") SOCIAL_AUTH_PIPELINE = ( 'social.pipeline.social_auth.social_details', 'social.pipeline.social_auth.social_uid', 'social.pipeline.social_auth.auth_allowed', 'social.pipeline.social_auth.social_user', 'social.pipeline.user.get_username', 'social.pipeline.social_auth.associate_by_email', 'social.pipeline.user.create_user', 'social.pipeline.social_auth.associate_user', 'social.pipeline.social_auth.load_extra_data', 'social.pipeline.user.user_details' ) SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
mit
Python
61731632b04ca1d9a719b6b4b62fa0a97926e3a9
clean up unused imports
mnubo/kubernetes-py,mnubo/kubernetes-py,sebastienc/kubernetes-py,froch/kubernetes-py,froch/kubernetes-py,sebastienc/kubernetes-py
kubernetes/K8sHorizontalPodAutoscaler.py
kubernetes/K8sHorizontalPodAutoscaler.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is subject to the terms and conditions defined in # file 'LICENSE.md', which is part of this source code package. # from kubernetes.K8sObject import K8sObject from kubernetes.models.v1.HorizontalPodAutoscaler import HorizontalPodAutoscaler class K8sHorizontalPodAutoscaler(K8sObject): def __init__(self, config=None, name=None): super(K8sHorizontalPodAutoscaler, self).__init__( config=config, obj_type='HorizontalPodAutoscaler', name=name ) # ------------------------------------------------------------------------------------- override def create(self): super(K8sHorizontalPodAutoscaler, self).create() self.get() return self def update(self): super(K8sHorizontalPodAutoscaler, self).update() self.get() return self def list(self, pattern=None): ls = super(K8sHorizontalPodAutoscaler, self).list() hpas = list(map(lambda x: HorizontalPodAutoscaler(x), ls)) if pattern is not None: hpas = list(filter(lambda x: pattern in x.name, hpas)) k8s = [] for x in hpas: z = K8sHorizontalPodAutoscaler(config=self.config, name=x.name) z.model = x k8s.append(z) return k8s # ------------------------------------------------------------------------------------- get def get(self): self.model = HorizontalPodAutoscaler(self.get_model()) return self # ------------------------------------------------------------------------------------- cpu_percent @property def cpu_percent(self): return self.model.spec.cpu_utilization @cpu_percent.setter def cpu_percent(self, pct=None): self.model.spec.cpu_utilization = pct # ------------------------------------------------------------------------------------- min replicas @property def min_replicas(self): return self.model.spec.min_replicas @min_replicas.setter def min_replicas(self, min=None): self.model.spec.min_replicas = min # ------------------------------------------------------------------------------------- max replicas @property def max_replicas(self): return self.model.spec.max_replicas @max_replicas.setter def max_replicas(self, max=None): self.model.spec.max_replicas = max
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is subject to the terms and conditions defined in # file 'LICENSE.md', which is part of this source code package. # from kubernetes.K8sObject import K8sObject from kubernetes.K8sDeployment import K8sDeployment from kubernetes.K8sReplicationController import K8sReplicationController from kubernetes.K8sExceptions import NotFoundException from kubernetes.models.v1.HorizontalPodAutoscaler import HorizontalPodAutoscaler import subprocess class K8sHorizontalPodAutoscaler(K8sObject): def __init__(self, config=None, name=None): super(K8sHorizontalPodAutoscaler, self).__init__( config=config, obj_type='HorizontalPodAutoscaler', name=name ) # ------------------------------------------------------------------------------------- override def create(self): super(K8sHorizontalPodAutoscaler, self).create() self.get() return self def update(self): super(K8sHorizontalPodAutoscaler, self).update() self.get() return self def list(self, pattern=None): ls = super(K8sHorizontalPodAutoscaler, self).list() hpas = list(map(lambda x: HorizontalPodAutoscaler(x), ls)) if pattern is not None: hpas = list(filter(lambda x: pattern in x.name, hpas)) k8s = [] for x in hpas: z = K8sHorizontalPodAutoscaler(config=self.config, name=x.name) z.model = x k8s.append(z) return k8s # ------------------------------------------------------------------------------------- get def get(self): self.model = HorizontalPodAutoscaler(self.get_model()) return self # ------------------------------------------------------------------------------------- cpu_percent @property def cpu_percent(self): return self.model.spec.cpu_utilization @cpu_percent.setter def cpu_percent(self, pct=None): self.model.spec.cpu_utilization = pct # ------------------------------------------------------------------------------------- min replicas @property def min_replicas(self): return self.model.spec.min_replicas @min_replicas.setter def min_replicas(self, min=None): self.model.spec.min_replicas = min # ------------------------------------------------------------------------------------- max replicas @property def max_replicas(self): return self.model.spec.max_replicas @max_replicas.setter def max_replicas(self, max=None): self.model.spec.max_replicas = max
apache-2.0
Python
b6947fa1850c888cd5b3190b2abf315409f01cdc
Add an explicit logfile rollover at the beginning of each Tulsi bazel build.
pinterest/tulsi,bazelbuild/tulsi,pinterest/tulsi,bazelbuild/tulsi,bazelbuild/tulsi,pinterest/tulsi,bazelbuild/tulsi,pinterest/tulsi,bazelbuild/tulsi,pinterest/tulsi,bazelbuild/tulsi,pinterest/tulsi
src/TulsiGenerator/Scripts/tulsi_logging.py
src/TulsiGenerator/Scripts/tulsi_logging.py
# Copyright 2017 The Tulsi Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging routines used by Tulsi scripts.""" import logging import logging.handlers import os class Logger(object): """Tulsi specific logging.""" def __init__(self): logging_dir = os.path.expanduser('~/Library/Application Support/Tulsi') if not os.path.exists(logging_dir): os.mkdir(logging_dir) logfile = os.path.join(logging_dir, 'build_log.txt') # Currently only creates a single logger called 'tulsi_logging'. If # additional loggers are needed, consider adding a name attribute to the # Logger. self._logger = logging.getLogger('tulsi_logging') self._logger.setLevel(logging.INFO) file_handler = logging.handlers.RotatingFileHandler(logfile, backupCount=20) file_handler.setLevel(logging.INFO) # Create a new log file for each build. file_handler.doRollover() self._logger.addHandler(file_handler) console = logging.StreamHandler() console.setLevel(logging.INFO) self._logger.addHandler(console) def log_action(self, action_name, action_id, seconds): del action_id # Unused by this logger. # Log to file and print to stdout for display in the Xcode log. self._logger.info('<*> %s completed in %0.3f ms', action_name, seconds * 1000)
# Copyright 2017 The Tulsi Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging routines used by Tulsi scripts.""" import logging import logging.handlers import os class Logger(object): """Tulsi specific logging.""" def __init__(self): logging_dir = os.path.expanduser('~/Library/Application Support/Tulsi') if not os.path.exists(logging_dir): os.mkdir(logging_dir) logfile = os.path.join(logging_dir, 'build_log.txt') # Currently only creates a single logger called 'tulsi_logging'. If # additional loggers are needed, consider adding a name attribute to the # Logger. self._logger = logging.getLogger('tulsi_logging') self._logger.setLevel(logging.INFO) file_handler = logging.handlers.RotatingFileHandler(logfile, backupCount=5) file_handler.setLevel(logging.INFO) self._logger.addHandler(file_handler) console = logging.StreamHandler() console.setLevel(logging.INFO) self._logger.addHandler(console) def log_action(self, action_name, action_id, seconds): del action_id # Unused by this logger. # Log to file and print to stdout for display in the Xcode log. self._logger.info('<*> %s completed in %0.3f ms', action_name, seconds * 1000)
apache-2.0
Python
48c3a35deffaca384189c8342a65debf03036dff
Remove semicolons
tijme/angularjs-csti-scanner,tijme/angularjs-sandbox-escape-scanner
acstis/Logging.py
acstis/Logging.py
# -*- coding: utf-8 -*- # MIT License # # Copyright (c) 2017 Tijme Gommers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from colorama import Fore, Back, Style import datetime class Logging: @staticmethod def info(message): print(Back.BLACK + str(datetime.datetime.now()) + ": " + message) @staticmethod def red(message): print(Fore.RED + Back.BLACK + str(datetime.datetime.now()) + ": " + message) @staticmethod def green(message): print(Fore.GREEN + Back.BLACK + str(datetime.datetime.now()) + ": " + message) @staticmethod def yellow(message): print(Fore.YELLOW + Back.BLACK + str(datetime.datetime.now()) + ": " + message)
# -*- coding: utf-8 -*- # MIT License # # Copyright (c) 2017 Tijme Gommers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from colorama import Fore, Back, Style import datetime class Logging: @staticmethod def info(message): print(Back.BLACK + str(datetime.datetime.now()) + ": " + message); @staticmethod def red(message): print(Fore.RED + Back.BLACK + str(datetime.datetime.now()) + ": " + message); @staticmethod def green(message): print(Fore.GREEN + Back.BLACK + str(datetime.datetime.now()) + ": " + message); @staticmethod def yellow(message): print(Fore.YELLOW + Back.BLACK + str(datetime.datetime.now()) + ": " + message);
mit
Python
9365a3dce9cc1abe507c36d3dd1d79ca7fcab15c
add admin for Product
byteweaver/django-eca-catalogue
eca_catalogue/abstract_admin.py
eca_catalogue/abstract_admin.py
from django.contrib import admin from treebeard.admin import TreeAdmin class AbstractProductCategoryAdmin(admin.ModelAdmin): prepopulated_fields = {"slug": ("name",)} class AbstractNestedProductCategoryAdmin(TreeAdmin): prepopulated_fields = {"slug": ("name",)} class AbstractProductAdmin(admin.ModelAdmin): list_display = ['item_number', 'name',] prepopulated_fields = {"slug": ("name",)}
from django.contrib import admin from treebeard.admin import TreeAdmin class AbstractProductCategoryAdmin(admin.ModelAdmin): prepopulated_fields = {"slug": ("name",)} class AbstractNestedProductCategoryAdmin(TreeAdmin): prepopulated_fields = {"slug": ("name",)}
bsd-3-clause
Python
9dca7838d8fb495acc02241b55a30870b7eec0ba
fix flake error in apps.py
justquick/django-activity-stream,pombredanne/django-activity-stream,justquick/django-activity-stream,jrsupplee/django-activity-stream,thelabnyc/django-activity-stream,jrsupplee/django-activity-stream,pombredanne/django-activity-stream,thelabnyc/django-activity-stream
actstream/apps.py
actstream/apps.py
from django.core.exceptions import ImproperlyConfigured from actstream import settings from actstream.signals import action from actstream.compat_apps import AppConfig class ActstreamConfig(AppConfig): name = 'actstream' def ready(self): from actstream.actions import action_handler action.connect(action_handler, dispatch_uid='actstream.models') action_class = self.get_model('action') if settings.USE_JSONFIELD: try: from jsonfield.fields import JSONField except ImportError: raise ImproperlyConfigured( 'You must have django-jsonfield installed ' 'if you wish to use a JSONField on your actions' ) JSONField(blank=True, null=True).contribute_to_class(action_class, 'data')
from django.core.exceptions import ImproperlyConfigured from actstream import settings from actstream.signals import action from actstream.compat_apps import AppConfig class ActstreamConfig(AppConfig): name = 'actstream' def ready(self): from actstream.actions import action_handler action.connect(action_handler, dispatch_uid='actstream.models') action_class = self.get_model('action') if settings.USE_JSONFIELD: try: from jsonfield.fields import JSONField except ImportError: raise ImproperlyConfigured('You must have django-jsonfield installed ' 'if you wish to use a JSONField on your actions') JSONField(blank=True, null=True).contribute_to_class(action_class, 'data')
bsd-3-clause
Python
d8099cd712279afa1c4e73989c7f03bc9de6dd4c
fix performance problem with historian
genome/flow-workflow,genome/flow-workflow,genome/flow-workflow
flow_workflow/historian/operation_data.py
flow_workflow/historian/operation_data.py
import json class OperationData(object): def __init__(self, net_key, operation_id, color): self.net_key = net_key self.operation_id = int(operation_id) self.color = int(color) def dumps(self): return json.dumps(self.to_dict, sort_keys=True) @classmethod def loads(cls, string): return cls.from_dict(json.loads(string)) @property def to_dict(self): return { 'net_key': str(self.net_key), 'operation_id': self.operation_id, 'color': self.color } @classmethod def from_dict(cls, operation_data_dict): return cls(**operation_data_dict) def __repr__(self): return str(self) def __str__(self): return "OperationData(net_key='%s', operation_id=%s, color=%s)" % ( self.net_key, self.operation_id, self.color) def __eq__(self, other): return self.to_dict == other.to_dict
import json class OperationData(object): def __init__(self, net_key, operation_id, color): self.net_key = net_key self.operation_id = int(operation_id) self.color = int(color) def dumps(self): return json.dumps(self.to_dict, sort_keys=True) @classmethod def loads(cls, string): return cls.from_dict(json.loads(string)) @property def to_dict(self): return { 'net_key': self.net_key, 'operation_id': self.operation_id, 'color': self.color } @classmethod def from_dict(cls, operation_data_dict): return cls(**operation_data_dict) def __repr__(self): return str(self) def __str__(self): return "OperationData(net_key='%s', operation_id=%s, color=%s)" % ( self.net_key, self.operation_id, self.color) def __eq__(self, other): return self.to_dict == other.to_dict
agpl-3.0
Python
71554067936e2355658e6e566e8fcb4a66f24ee7
Add new keyfile
Code4SA/mma-dexter,Code4SA/mma-dexter,Code4SA/mma-dexter
dexter/config/celeryconfig.py
dexter/config/celeryconfig.py
from celery.schedules import crontab # uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables BROKER_URL = 'sqs://' BROKER_TRANSPORT_OPTIONS = { 'region': 'eu-west-1', 'polling_interval': 15 * 1, 'queue_name_prefix': 'mma-dexter-', 'visibility_timeout': 3600*12, } # all our tasks can by retried if the worker fails CELERY_ACKS_LATE = True CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] CELERY_TIMEZONE = 'Africa/Johannesburg' CELERY_ENABLE_UTC = True CELERYBEAT_SCHEDULE = { 'fetch-yesterdays-feeds': { 'schedule': crontab(hour=1, minute=0), 'task': 'dexter.tasks.fetch_yesterdays_feeds', }, 'back-process-feeds': { 'schedule': crontab(hour=11, minute=0), 'task': 'dexter.tasks.back_process_feeds', }, 'fetch_yesterdays_feeds_rerun': { 'schedule': crontab(hour=12, minute=0), 'task': 'dexter.tasks.back_process_feeds', }, # 'backfill-taxonomies': { # 'schedule': crontab(hour=21, minute=0), # 'task': 'dexter.tasks.backfill_taxonomies', # }, }
from celery.schedules import crontab # uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables BROKER_URL = 'sqs://' BROKER_TRANSPORT_OPTIONS = { 'region': 'eu-west-1', 'polling_interval': 15 * 1, 'queue_name_prefix': 'mma-dexter-', 'visibility_timeout': 3600*12, } # all our tasks can by retried if the worker fails CELERY_ACKS_LATE = True CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] CELERY_TIMEZONE = 'Africa/Johannesburg' CELERY_ENABLE_UTC = True CELERYBEAT_SCHEDULE = { 'fetch-yesterdays-feeds': { 'schedule': crontab(hour=3, minute=0), 'task': 'dexter.tasks.fetch_yesterdays_feeds', }, 'back-process-feeds': { 'schedule': crontab(hour=11, minute=0), 'task': 'dexter.tasks.back_process_feeds', }, fetch_yesterdays_feeds_rerun: { 'schedule': crontab(hour=12, minute=0), 'task': 'dexter.tasks.back_process_feeds', }, # 'backfill-taxonomies': { # 'schedule': crontab(hour=21, minute=0), # 'task': 'dexter.tasks.backfill_taxonomies', # }, }
apache-2.0
Python
a0b1948261555b724e9c72558a7ca18d793f4748
Support Ticket - In response to fix
Tejal011089/Medsyn2_app,Tejal011089/paypal_erpnext,Yellowen/Owrang,Suninus/erpnext,gangadharkadam/saloon_erp,rohitwaghchaure/New_Theme_Erp,mbauskar/phrerp,suyashphadtare/vestasi-erp-jan-end,Tejal011089/osmosis_erpnext,meisterkleister/erpnext,ThiagoGarciaAlves/erpnext,suyashphadtare/vestasi-erp-final,mbauskar/omnitech-erpnext,gangadhar-kadam/verve-erp,mbauskar/phrerp,rohitwaghchaure/New_Theme_Erp,SPKian/Testing,BhupeshGupta/erpnext,gangadharkadam/saloon_erp,gangadharkadam/office_erp,pombredanne/erpnext,hatwar/Das_erpnext,Tejal011089/huntercamp_erpnext,Tejal011089/digitales_erpnext,gangadharkadam/v4_erp,Tejal011089/paypal_erpnext,Tejal011089/fbd_erpnext,rohitwaghchaure/erpnext_smart,gangadharkadam/v5_erp,suyashphadtare/vestasi-erp-jan-end,gangadharkadam/office_erp,suyashphadtare/sajil-erp,gangadharkadam/verveerp,MartinEnder/erpnext-de,gangadharkadam/v6_erp,susuchina/ERPNEXT,ThiagoGarciaAlves/erpnext,suyashphadtare/vestasi-update-erp,indictranstech/vestasi-erpnext,rohitwaghchaure/GenieManager-erpnext,suyashphadtare/vestasi-erp-1,Tejal011089/med2-app,Tejal011089/huntercamp_erpnext,gangadhar-kadam/verve_live_erp,sagar30051991/ozsmart-erp,rohitwaghchaure/GenieManager-erpnext,BhupeshGupta/erpnext,rohitwaghchaure/digitales_erpnext,indictranstech/focal-erpnext,indictranstech/erpnext,gangadhar-kadam/sms-erpnext,saurabh6790/test-med-app,gangadhar-kadam/helpdesk-erpnext,geekroot/erpnext,gangadharkadam/letzerp,gangadharkadam/sher,fuhongliang/erpnext,saurabh6790/trufil_app,fuhongliang/erpnext,suyashphadtare/sajil-final-erp,rohitwaghchaure/erpnext_smart,dieface/erpnext,saurabh6790/medsyn-app1,suyashphadtare/vestasi-update-erp,saurabh6790/test-erp,suyashphadtare/sajil-final-erp,mbauskar/helpdesk-erpnext,Drooids/erpnext,pawaranand/phrerp,mbauskar/phrerp,saurabh6790/omnisys-app,treejames/erpnext,gmarke/erpnext,gangadhar-kadam/helpdesk-erpnext,Aptitudetech/ERPNext,mbauskar/Das_Erpnext,indictranstech/fbd_erpnext,meisterkleister/erpnext,gangadhar-kadam/powapp,Drooids/erpnext,gangadharkadam/contributionerp,saurabh6790/omnisys-app,gangadharkadam/letzerp,gangadhar-kadam/sapphire_app,njmube/erpnext,sheafferusa/erpnext,anandpdoshi/erpnext,gangadhar-kadam/latestchurcherp,gangadharkadam/letzerp,gangadharkadam/sterp,mbauskar/Das_Erpnext,mbauskar/alec_frappe5_erpnext,indictranstech/Das_Erpnext,gangadhar-kadam/sapphire_app,indictranstech/focal-erpnext,Tejal011089/fbd_erpnext,njmube/erpnext,saurabh6790/test_final_med_app,Tejal011089/paypal_erpnext,gangadharkadam/v6_erp,Tejal011089/digitales_erpnext,hanselke/erpnext-1,mbauskar/helpdesk-erpnext,netfirms/erpnext,suyashphadtare/sajil-final-erp,ShashaQin/erpnext,rohitwaghchaure/New_Theme_Erp,gangadharkadam/contributionerp,indictranstech/internal-erpnext,njmube/erpnext,SPKian/Testing2,rohitwaghchaure/erpnext-receipher,gangadhar-kadam/powapp,gangadharkadam/vlinkerp,indictranstech/internal-erpnext,rohitwaghchaure/GenieManager-erpnext,ShashaQin/erpnext,ThiagoGarciaAlves/erpnext,Suninus/erpnext,indictranstech/Das_Erpnext,indictranstech/focal-erpnext,hatwar/Das_erpnext,rohitwaghchaure/New_Theme_Erp,anandpdoshi/erpnext,Tejal011089/trufil-erpnext,sagar30051991/ozsmart-erp,mbauskar/omnitech-erpnext,hatwar/focal-erpnext,rohitwaghchaure/erpnext-receipher,geekroot/erpnext,Tejal011089/huntercamp_erpnext,gangadharkadam/smrterp,SPKian/Testing2,shitolepriya/test-erp,hanselke/erpnext-1,saurabh6790/ON-RISAPP,indictranstech/focal-erpnext,saurabh6790/omnit-app,mbauskar/helpdesk-erpnext,Tejal011089/huntercamp_erpnext,gsnbng/erpnext,Suninus/erpnext,gsnbng/erpnext,gangadhar-kadam/verve_erp,mbauskar/omnitech-demo-erpnext,indictranstech/phrerp,saurabh6790/omnit-app,hatwar/focal-erpnext,hernad/erpnext,SPKian/Testing,indictranstech/fbd_erpnext,pombredanne/erpnext,ShashaQin/erpnext,Tejal011089/fbd_erpnext,netfirms/erpnext,saurabh6790/medsynaptic1-app,gangadharkadam/vlinkerp,indictranstech/reciphergroup-erpnext,mbauskar/sapphire-erpnext,mbauskar/omnitech-demo-erpnext,gangadharkadam/saloon_erp,mbauskar/omnitech-erpnext,saurabh6790/omn-app,rohitwaghchaure/erpnext-receipher,gmarke/erpnext,indictranstech/buyback-erp,SPKian/Testing,gangadharkadam/saloon_erp_install,gangadhar-kadam/hrerp,saurabh6790/test_final_med_app,gangadhar-kadam/adb-erp,gsnbng/erpnext,indictranstech/phrerp,indictranstech/biggift-erpnext,gangadharkadam/verveerp,saurabh6790/aimobilize,MartinEnder/erpnext-de,gangadhar-kadam/sms-erpnext,indictranstech/erpnext,saurabh6790/omni-apps,dieface/erpnext,gangadhar-kadam/mtn-erpnext,saurabh6790/aimobilize,mbauskar/omnitech-demo-erpnext,saurabh6790/medsyn-app,saurabh6790/medsynaptic1-app,aruizramon/alec_erpnext,rohitwaghchaure/digitales_erpnext,suyashphadtare/vestasi-erp-jan-end,BhupeshGupta/erpnext,Tejal011089/osmosis_erpnext,indictranstech/Das_Erpnext,rohitwaghchaure/erpnext_smart,gangadhar-kadam/verve-erp,rohitwaghchaure/erpnext-receipher,indictranstech/tele-erpnext,saurabh6790/test-erp,tmimori/erpnext,shitolepriya/test-erp,saurabh6790/aimobilize-app-backup,geekroot/erpnext,dieface/erpnext,saurabh6790/medapp,indictranstech/tele-erpnext,MartinEnder/erpnext-de,mbauskar/sapphire-erpnext,gangadhar-kadam/verve_test_erp,gangadhar-kadam/smrterp,gangadhar-kadam/nassimapp,ShashaQin/erpnext,gangadharkadam/vlinkerp,saurabh6790/medsynaptic-app,gangadharkadam/johnerp,gangadhar-kadam/laganerp,pawaranand/phrerp,gangadharkadam/v6_erp,sheafferusa/erpnext,gangadharkadam/contributionerp,netfirms/erpnext,indictranstech/Das_Erpnext,gangadharkadam/v4_erp,saurabh6790/aimobilize-app-backup,mbauskar/alec_frappe5_erpnext,mahabuber/erpnext,shft117/SteckerApp,mahabuber/erpnext,sheafferusa/erpnext,rohitwaghchaure/digitales_erpnext,indictranstech/trufil-erpnext,suyashphadtare/vestasi-erp-final,indictranstech/reciphergroup-erpnext,hernad/erpnext,mbauskar/helpdesk-erpnext,indictranstech/biggift-erpnext,suyashphadtare/vestasi-erp-jan-end,gangadhar-kadam/powapp,Tejal011089/osmosis_erpnext,saurabh6790/pow-app,indictranstech/buyback-erp,BhupeshGupta/erpnext,gangadharkadam/sher,gangadhar-kadam/prjapp,gangadhar-kadam/latestchurcherp,Tejal011089/digitales_erpnext,gangadharkadam/v6_erp,shft117/SteckerApp,Tejal011089/fbd_erpnext,saurabh6790/omn-app,indictranstech/vestasi-erpnext,hatwar/buyback-erpnext,gangadhar-kadam/sapphire_app,shitolepriya/test-erp,njmube/erpnext,susuchina/ERPNEXT,treejames/erpnext,saurabh6790/tru_app_back,indictranstech/phrerp,gangadhar-kadam/verve_test_erp,gangadhar-kadam/latestchurcherp,indictranstech/reciphergroup-erpnext,suyashphadtare/vestasi-erp-1,gangadharkadam/tailorerp,saurabh6790/medsyn-app1,suyashphadtare/vestasi-erp-1,gangadhar-kadam/church-erpnext,gangadhar-kadam/laganerp,hatwar/buyback-erpnext,mbauskar/internal-hr,gangadhar-kadam/verve-erp,gangadhar-kadam/prjapp,indictranstech/buyback-erp,suyashphadtare/test,hatwar/focal-erpnext,saurabh6790/test-erp,indictranstech/tele-erpnext,ThiagoGarciaAlves/erpnext,suyashphadtare/test,shft117/SteckerApp,gangadhar-kadam/adb-erp,pombredanne/erpnext,gangadhar-kadam/latestchurcherp,gangadhar-kadam/smrterp,pawaranand/phrerp,saurabh6790/pow-app,mbauskar/internal-hr,hatwar/buyback-erpnext,saurabh6790/OFF-RISAPP,mahabuber/erpnext,4commerce-technologies-AG/erpnext,indictranstech/trufil-erpnext,Tejal011089/osmosis_erpnext,susuchina/ERPNEXT,mbauskar/Das_Erpnext,gangadharkadam/office_erp,mbauskar/alec_frappe5_erpnext,gangadhar-kadam/nassimapp,saurabh6790/tru_app_back,gangadhar-kadam/helpdesk-erpnext,treejames/erpnext,gangadhar-kadam/verve_live_erp,suyashphadtare/sajil-erp,indictranstech/fbd_erpnext,meisterkleister/erpnext,indictranstech/buyback-erp,indictranstech/reciphergroup-erpnext,MartinEnder/erpnext-de,SPKian/Testing2,indictranstech/osmosis-erpnext,gangadharkadam/v5_erp,hatwar/focal-erpnext,Tejal011089/Medsyn2_app,SPKian/Testing2,mbauskar/omnitech-erpnext,tmimori/erpnext,gangadhar-kadam/verve_test_erp,mbauskar/internal-hr,saurabh6790/OFF-RISAPP,indictranstech/fbd_erpnext,gangadharkadam/verveerp,saurabh6790/trufil_app,susuchina/ERPNEXT,gangadhar-kadam/verve_live_erp,indictranstech/osmosis-erpnext,gmarke/erpnext,gangadharkadam/saloon_erp_install,gangadharkadam/sterp,saurabh6790/test-med-app,rohitwaghchaure/GenieManager-erpnext,gangadharkadam/saloon_erp_install,hatwar/Das_erpnext,pombredanne/erpnext,gangadharkadam/saloon_erp_install,suyashphadtare/gd-erp,anandpdoshi/erpnext,gangadhar-kadam/mtn-erpnext,mbauskar/sapphire-erpnext,4commerce-technologies-AG/erpnext,suyashphadtare/test,fuhongliang/erpnext,sheafferusa/erpnext,4commerce-technologies-AG/erpnext,pawaranand/phrerp,gangadharkadam/vlinkerp,saurabh6790/medsynaptic-app,tmimori/erpnext,hernad/erpnext,Tejal011089/trufil-erpnext,hatwar/Das_erpnext,indictranstech/biggift-erpnext,saurabh6790/alert-med-app,Tejal011089/paypal_erpnext,gangadharkadam/verveerp,saurabh6790/ON-RISAPP,gangadhar-kadam/hrerp,aruizramon/alec_erpnext,gangadharkadam/contributionerp,indictranstech/erpnext,geekroot/erpnext,hanselke/erpnext-1,gangadhar-kadam/verve_live_erp,suyashphadtare/vestasi-erp-final,aruizramon/alec_erpnext,gangadharkadam/saloon_erp,gangadhar-kadam/church-erpnext,gangadharkadam/tailorerp,suyashphadtare/vestasi-update-erp,shitolepriya/test-erp,Tejal011089/trufil-erpnext,indictranstech/erpnext,gangadhar-kadam/mic-erpnext,indictranstech/trufil-erpnext,saurabh6790/alert-med-app,gangadhar-kadam/verve_test_erp,hernad/erpnext,gangadharkadam/v5_erp,gsnbng/erpnext,saurabh6790/test-erp,suyashphadtare/gd-erp,saurabh6790/omnitech-apps,indictranstech/vestasi-erpnext,netfirms/erpnext,saurabh6790/med_new_app,sagar30051991/ozsmart-erp,Drooids/erpnext,sagar30051991/ozsmart-erp,saurabh6790/med_new_app,mbauskar/phrerp,mahabuber/erpnext,gangadharkadam/smrterp,saurabh6790/med_app_rels,anandpdoshi/erpnext,dieface/erpnext,Suninus/erpnext,mbauskar/Das_Erpnext,gmarke/erpnext,Tejal011089/digitales_erpnext,indictranstech/biggift-erpnext,saurabh6790/omnitech-apps,Yellowen/Owrang,indictranstech/trufil-erpnext,meisterkleister/erpnext,indictranstech/internal-erpnext,shft117/SteckerApp,gangadharkadam/letzerp,saurabh6790/omni-apps,saurabh6790/med_app_rels,mbauskar/omnitech-demo-erpnext,gangadharkadam/johnerp,hanselke/erpnext-1,gangadhar-kadam/verve_erp,gangadhar-kadam/laganerp,indictranstech/phrerp,gangadharkadam/v4_erp,indictranstech/tele-erpnext,mbauskar/sapphire-erpnext,gangadhar-kadam/verve_erp,indictranstech/osmosis-erpnext,mbauskar/alec_frappe5_erpnext,suyashphadtare/sajil-erp,indictranstech/osmosis-erpnext,saurabh6790/medapp,Tejal011089/med2-app,gangadharkadam/v4_erp,gangadhar-kadam/verve_erp,gangadhar-kadam/mic-erpnext,Drooids/erpnext,gangadhar-kadam/helpdesk-erpnext,gangadharkadam/v5_erp,aruizramon/alec_erpnext,indictranstech/internal-erpnext,indictranstech/vestasi-erpnext,rohitwaghchaure/digitales_erpnext,fuhongliang/erpnext,SPKian/Testing,suyashphadtare/gd-erp,suyashphadtare/gd-erp,saurabh6790/medsyn-app,hatwar/buyback-erpnext,Tejal011089/trufil-erpnext,tmimori/erpnext,treejames/erpnext
erpnext/support/doctype/support_ticket/support_ticket.py
erpnext/support/doctype/support_ticket/support_ticket.py
import webnotes from webnotes.model.doc import make_autoname from utilities.transaction_base import TransactionBase from home import update_feed class DocType(TransactionBase): def __init__(self, doc, doclist=[]): self.doc = doc self.doclist = doclist def autoname(self): self.doc.name = make_autoname(self.doc.naming_series+'.#####') def send_response(self): """ Adds a new response to the ticket and sends an email to the sender """ if not self.doc.new_response: webnotes.msgprint("Please write something as a response", raise_exception=1) subject = '[' + self.doc.name + '] ' + self.doc.subject response = self.doc.new_response + '\n\n[Please do not change the subject while responding.]' # add last response to new response response += self.last_response() signature = webnotes.conn.get_value('Email Settings',None,'support_signature') if signature: response += '\n\n' + signature from webnotes.utils.email_lib import sendmail sendmail(\ recipients = [self.doc.raised_by], \ sender=webnotes.conn.get_value('Email Settings',None,'support_email'), \ subject=subject, \ msg=response) self.doc.new_response = None webnotes.conn.set(self.doc,'status','Waiting for Customer') self.make_response_record(response) def last_response(self): """return last response""" tmp = webnotes.conn.sql("""select mail from `tabSupport Ticket Response` where parent = %s order by creation desc limit 1 """, self.doc.name) if not tmp: tmp = webnotes.conn.sql(""" SELECT description from `tabSupport Ticket` where name = %s """, self.doc.name) return '\n\n=== In response to ===\n\n' + tmp[0][0] def make_response_record(self, response, from_email = None, content_type='text/plain'): """ Creates a new Support Ticket Response record """ # add to Support Ticket Response from webnotes.model.doc import Document d = Document('Support Ticket Response') d.from_email = from_email or webnotes.user.name d.parent = self.doc.name d.mail = response d.content_type = content_type d.save(1) def close_ticket(self): webnotes.conn.set(self.doc,'status','Closed') update_feed(self.doc) def reopen_ticket(self): webnotes.conn.set(self.doc,'status','Open') update_feed(self.doc)
import webnotes from webnotes.model.doc import make_autoname from utilities.transaction_base import TransactionBase from home import update_feed class DocType(TransactionBase): def __init__(self, doc, doclist=[]): self.doc = doc self.doclist = doclist def autoname(self): self.doc.name = make_autoname(self.doc.naming_series+'.#####') def send_response(self): """ Adds a new response to the ticket and sends an email to the sender """ if not self.doc.new_response: webnotes.msgprint("Please write something as a response", raise_exception=1) subject = '[' + self.doc.name + '] ' + self.doc.subject response = self.doc.new_response + '\n\n[Please do not change the subject while responding.]' # add last response to new response response += self.last_response() signature = webnotes.conn.get_value('Email Settings',None,'support_signature') if signature: response += '\n\n' + signature from webnotes.utils.email_lib import sendmail sendmail(\ recipients = [self.doc.raised_by], \ sender=webnotes.conn.get_value('Email Settings',None,'support_email'), \ subject=subject, \ msg=response) self.doc.new_response = None webnotes.conn.set(self.doc,'status','Waiting for Customer') self.make_response_record(response) def last_response(self): """return last response""" tmp = webnotes.conn.sql("""select mail from `tabSupport Ticket Response` where parent = %s order by creation desc limit 1 """, self.doc.name) return '\n\n=== In response to ===\n\n' + tmp[0][0] def make_response_record(self, response, from_email = None, content_type='text/plain'): """ Creates a new Support Ticket Response record """ # add to Support Ticket Response from webnotes.model.doc import Document d = Document('Support Ticket Response') d.from_email = from_email or webnotes.user.name d.parent = self.doc.name d.mail = response d.content_type = content_type d.save(1) def close_ticket(self): webnotes.conn.set(self.doc,'status','Closed') update_feed(self.doc) def reopen_ticket(self): webnotes.conn.set(self.doc,'status','Open') update_feed(self.doc)
agpl-3.0
Python
30e44e48bacb1403d4df96df0654bdade324ec3e
Add test for `get_current_shift`
mimischi/django-clock,mimischi/django-clock,mimischi/django-clock,mimischi/django-clock
clock/shifts/tests/test_utils.py
clock/shifts/tests/test_utils.py
"""Tests for the shift utilities.""" from test_plus import TestCase from clock.shifts.factories import ShiftFactory, UserFactory from clock.shifts.models import Shift from clock.shifts.utils import get_current_shift, get_last_shifts from clock.contracts.models import Contract class TestUtils(TestCase): """Test the functionality of the shift utilities.""" def setUp(self): self.user = self.make_user() self.contract1 = Contract.objects.create( employee=self.user, department='Test department', hours='50') def test_get_last_shifts(self): employee = UserFactory() # Function returns `None` when user has no shifts yet no_shifts = get_last_shifts(employee) self.assertIsNone(no_shifts) # Function returns the last 5 shifts per default shifts = ShiftFactory.create_batch(10, employee=employee) five_shifts = get_last_shifts(employee) self.assertEqual(len(five_shifts), 5) self.assertIsInstance(five_shifts[0], Shift) self.assertEqual(five_shifts[0].employee, employee) # Assert we get the correct order, with the latest finished shift first. for i, shift in enumerate(five_shifts): try: self.assertTrue(five_shifts[i].shift_finished > five_shifts[i + 1].shift_finished) except IndexError: pass # Return seven shifts seven_shifts = get_last_shifts(employee, count=7) self.assertEqual(len(seven_shifts), 7) # Return the maximum number of shifts, even if more are requested eleven_shifts = get_last_shifts(employee, count=11) self.assertEqual(len(eleven_shifts), 10) # Make sure we only retrieve finished shifts for shift in eleven_shifts: self.assertIsNotNone(shift.shift_finished) def test_retrieve_current_running_shift(self): """Test that we can retrieve the currently running shift.""" no_shift = get_current_shift(self.user) self.assertIsNone(no_shift) with self.login(username=self.user.username, password='password'): response = self.post( 'shift:quick_action', data={ '_start': True, }, follow=True) last_shift = get_current_shift(self.user) self.assertIsNotNone(last_shift) self.assertIsNone(last_shift.shift_finished, '')
"""Tests for the shift utilities.""" from test_plus import TestCase from clock.shifts.factories import UserFactory, ShiftFactory from clock.shifts.models import Shift from clock.shifts.utils import get_last_shifts class TestUtils(TestCase): """Test the functionality of the shift utilities.""" def test_get_last_shifts(self): employee = UserFactory() # Function returns `None` when user has no shifts yet no_shifts = get_last_shifts(employee) self.assertIsNone(no_shifts) # Function returns the last 5 shifts per default shifts = ShiftFactory.create_batch(10, employee=employee) five_shifts = get_last_shifts(employee) self.assertEqual(len(five_shifts), 5) self.assertIsInstance(five_shifts[0], Shift) self.assertEqual(five_shifts[0].employee, employee) # Assert we get the correct order, with the latest finished shift first. for i, shift in enumerate(five_shifts): try: self.assertTrue(five_shifts[i].shift_finished > five_shifts[i + 1].shift_finished) except IndexError: pass # Return seven shifts seven_shifts = get_last_shifts(employee, count=7) self.assertEqual(len(seven_shifts), 7) # Return the maximum number of shifts, even if more are requested eleven_shifts = get_last_shifts(employee, count=11) self.assertEqual(len(eleven_shifts), 10) # Make sure we only retrieve finished shifts for shift in eleven_shifts: self.assertIsNotNone(shift.shift_finished)
mit
Python
2bbf7bc31b0c7372c143e9d8d062302127ddadd8
add __version__ package attribute
SiLab-Bonn/online_monitor
online_monitor/__init__.py
online_monitor/__init__.py
# http://stackoverflow.com/questions/17583443/what-is-the-correct-way-to-share-package-version-with-setup-py-and-the-package from pkg_resources import get_distribution __version__ = get_distribution('online_monitor').version
mit
Python
1bc4c7ff0ecd5df9a1874c1f9930e33268c9524d
fix AddonMan
Alexey-T/CudaText,Alexey-T/CudaText,Alexey-T/CudaText,Alexey-T/CudaText,vhanla/CudaText,Alexey-T/CudaText,vhanla/CudaText,vhanla/CudaText,vhanla/CudaText,vhanla/CudaText,vhanla/CudaText,vhanla/CudaText,vhanla/CudaText,Alexey-T/CudaText,vhanla/CudaText,vhanla/CudaText,Alexey-T/CudaText,Alexey-T/CudaText
app/py/cuda_addonman/work_cudatext_updates__fosshub.py
app/py/cuda_addonman/work_cudatext_updates__fosshub.py
import sys import os import re import platform import tempfile import webbrowser import cudatext as app from .work_remote import * p = sys.platform X64 = platform.architecture()[0]=='64bit' DOWNLOAD_PAGE = 'https://www.fosshub.com/CudaText.html' TEXT_CPU = 'x64' if X64 else 'x32' REGEX_GROUP_VER = 1 DOWNLOAD_REGEX = ' href="(https://.+?=cudatext-win-'+TEXT_CPU+'-(.+?)\.zip)"' def versions_ordered(s1, s2): """ compare "1.10.0" and "1.9.0" correctly """ n1 = list(map(int, s1.split('.'))) n2 = list(map(int, s2.split('.'))) return n1<=n2 def check_cudatext(): if os.name!='nt': return fn = os.path.join(tempfile.gettempdir(), 'cudatext_download.html') app.msg_status('Downloading: '+DOWNLOAD_PAGE, True) get_url(DOWNLOAD_PAGE, fn, True) app.msg_status('') if not os.path.isfile(fn): app.msg_status('Cannot download: '+DOWNLOAD_PAGE) return text = open(fn, encoding='utf8').read() items = re.findall(DOWNLOAD_REGEX, text) if not items: app.msg_status('Cannot find download links') return items = sorted(items, reverse=True) print('Found links:') for i in items: print(' '+i[0]) url = items[0][0] ver_inet = items[0][REGEX_GROUP_VER] ver_local = app.app_exe_version() if versions_ordered(ver_inet, ver_local): app.msg_box('Latest CudaText is already here.\nLocal: %s\nInternet: %s' %(ver_local, ver_inet), app.MB_OK+app.MB_ICONINFO) return if app.msg_box('CudaText update is available.\nLocal: %s\nInternet: %s\n\nOpen download URL in browser?' %(ver_local, ver_inet), app.MB_YESNO+app.MB_ICONINFO) == app.ID_YES: webbrowser.open_new_tab(url) print('Opened download URL')
import sys import os import re import platform import tempfile import webbrowser import cudatext as app from .work_remote import * p = sys.platform X64 = platform.architecture()[0]=='64bit' DOWNLOAD_PAGE = 'https://www.fosshub.com/CudaText.html' TEXT_CPU = 'x64' if X64 else 'x32' REGEX_GROUP_VER = 1 DOWNLOAD_REGEX = ' href="(https://.+?=cudatext-win-'+TEXT_CPU+'-(.+?)\.zip)"' def versions_ordered(s1, s2): """ compare "1.10.0" and "1.9.0" correctly """ n1 = list(map(int, s1.split('.'))) n2 = list(map(int, s2.split('.'))) return n1<=n2 def check_cudatext(): fn = os.path.join(tempfile.gettempdir(), 'cudatext_download.html') app.msg_status('Downloading: '+DOWNLOAD_PAGE, True) get_url(DOWNLOAD_PAGE, fn, True) app.msg_status('') if not os.path.isfile(fn): app.msg_status('Cannot download: '+DOWNLOAD_PAGE) return text = open(fn, encoding='utf8').read() items = re.findall(DOWNLOAD_REGEX, text) if not items: app.msg_status('Cannot find download links') return items = sorted(items, reverse=True) print('Found links:') for i in items: print(' '+i[0]) url = items[0][0] ver_inet = items[0][REGEX_GROUP_VER] ver_local = app.app_exe_version() if versions_ordered(ver_inet, ver_local): app.msg_box('Latest CudaText is already here.\nLocal: %s\nInternet: %s' %(ver_local, ver_inet), app.MB_OK+app.MB_ICONINFO) return if app.msg_box('CudaText update is available.\nLocal: %s\nInternet: %s\n\nOpen download URL in browser?' %(ver_local, ver_inet), app.MB_YESNO+app.MB_ICONINFO) == app.ID_YES: webbrowser.open_new_tab(url) print('Opened download URL')
mpl-2.0
Python
34d9375de23384b3a5a777f802e93973ef7c4e60
Fix the ARC test case.
daviddesancho/mdtraj,mdtraj/mdtraj,swails/mdtraj,gph82/mdtraj,daviddesancho/mdtraj,mpharrigan/mdtraj,hainm/mdtraj,hainm/mdtraj,gph82/mdtraj,daviddesancho/mdtraj,mattwthompson/mdtraj,dwhswenson/mdtraj,dwhswenson/mdtraj,kyleabeauchamp/mdtraj,swails/mdtraj,mattwthompson/mdtraj,msultan/mdtraj,mattwthompson/mdtraj,casawa/mdtraj,msultan/mdtraj,ctk3b/mdtraj,kyleabeauchamp/mdtraj,leeping/mdtraj,mpharrigan/mdtraj,ctk3b/mdtraj,leeping/mdtraj,mattwthompson/mdtraj,gph82/mdtraj,tcmoore3/mdtraj,daviddesancho/mdtraj,casawa/mdtraj,msultan/mdtraj,jchodera/mdtraj,hainm/mdtraj,hainm/mdtraj,tcmoore3/mdtraj,jchodera/mdtraj,msultan/mdtraj,leeping/mdtraj,daviddesancho/mdtraj,tcmoore3/mdtraj,jchodera/mdtraj,ctk3b/mdtraj,casawa/mdtraj,swails/mdtraj,ctk3b/mdtraj,mpharrigan/mdtraj,dwhswenson/mdtraj,swails/mdtraj,mpharrigan/mdtraj,rmcgibbo/mdtraj,swails/mdtraj,mdtraj/mdtraj,mpharrigan/mdtraj,casawa/mdtraj,leeping/mdtraj,jchodera/mdtraj,kyleabeauchamp/mdtraj,rmcgibbo/mdtraj,kyleabeauchamp/mdtraj,hainm/mdtraj,kyleabeauchamp/mdtraj,tcmoore3/mdtraj,mdtraj/mdtraj,rmcgibbo/mdtraj,casawa/mdtraj,ctk3b/mdtraj
MDTraj/tests/test_arc.py
MDTraj/tests/test_arc.py
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2013 Stanford University and the Authors # # Authors: Lee-Ping Wang # Contributors: Robert McGibbon # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## import tempfile, os import numpy as np import mdtraj as md from mdtraj.formats import ArcTrajectoryFile, arc from mdtraj.formats import PDBTrajectoryFile from mdtraj.testing import get_fn, eq, DocStringFormatTester TestDocstrings = DocStringFormatTester(arc, error_on_none=True) fd, temp = tempfile.mkstemp(suffix='.arc') def teardown_module(module): """remove the temporary file created by tests in this file this gets automatically called by nose""" os.close(fd) os.unlink(temp) def test_read_0(): with ArcTrajectoryFile(get_fn('4waters.arc')) as f: xyz, leng, ang = f.read() with PDBTrajectoryFile(get_fn('4waters.pdb')) as f: xyz2 = f.positions eq(xyz, xyz2, decimal=3)
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2013 Stanford University and the Authors # # Authors: Lee-Ping Wang # Contributors: Robert McGibbon # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## import tempfile, os import numpy as np import mdtraj as md from mdtraj.formats import ArcTrajectoryFile, arc from mdtraj.formats import PDBTrajectoryFile from mdtraj.testing import get_fn, eq, DocStringFormatTester TestDocstrings = DocStringFormatTester(arc, error_on_none=True) fd, temp = tempfile.mkstemp(suffix='.arc') def teardown_module(module): """remove the temporary file created by tests in this file this gets automatically called by nose""" os.close(fd) os.unlink(temp) def test_read_0(): with ArcTrajectoryFile(get_fn('4waters.arc')) as f: xyz = f.read() with PDBTrajectoryFile(get_fn('4waters.pdb')) as f: xyz2 = f.positions eq(xyz, xyz2, decimal=3)
lgpl-2.1
Python
863d0d28fb26007c448610a845caab39b1451326
Add comparison with TCE output in CCD example
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
docs/examples/ccd.py
docs/examples/ccd.py
"""Automatic derivation of CCD equations. """ import urllib.request from pyspark import SparkConf, SparkContext from sympy import IndexedBase, Rational from drudge import PartHoleDrudge, CR, AN conf = SparkConf().setAppName('CCSD-derivation') ctx = SparkContext(conf=conf) dr = PartHoleDrudge(ctx) p = dr.names c_ = dr.op[AN] c_dag = dr.op[CR] a, b = p.V_dumms[:2] i, j = p.O_dumms[:2] t = IndexedBase('t') dr.set_dbbar_base(t, 2) doubles = dr.sum( (a, p.V), (b, p.V), (i, p.O), (j, p.O), Rational(1, 4) * t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i] ) curr = dr.ham h_bar = dr.ham for order in range(0, 4): curr = (curr | doubles).simplify() * Rational(1, order + 1) h_bar += curr en_eqn = dr.eval_fermi_vev(h_bar).simplify() proj = c_dag[i] * c_dag[j] * c_[b] * c_[a] t2_eqn = dr.eval_fermi_vev(proj * h_bar).simplify() # Check with the result from TCE. TCE_BASE_URL = 'http://www.scs.illinois.edu/~sohirata/' tce_res = [ dr.parse_tce( urllib.request.urlopen(TCE_BASE_URL + i).read().decode(), {2: t} ).simplify() for i in ['ccd_e.out', 'ccd_t2.out'] ] print('Checking with TCE result: ') print('Energy: ', en_eqn == tce_res[0]) print('T2 amplitude: ', t2_eqn == tce_res[1])
"""Automatic derivation of CCD equations. """ import pickle from pyspark import SparkConf, SparkContext from sympy import IndexedBase, Rational from drudge import PartHoleDrudge, CR, AN conf = SparkConf().setAppName('CCSD-derivation') ctx = SparkContext(conf=conf) dr = PartHoleDrudge(ctx) p = dr.names c_ = dr.op[AN] c_dag = dr.op[CR] a, b = p.V_dumms[:2] i, j = p.O_dumms[:2] t = IndexedBase('t') dr.set_dbbar_base(t, 2) doubles = dr.sum( (a, p.V), (b, p.V), (i, p.O), (j, p.O), t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i] ) curr = dr.ham h_bar = dr.ham for i in range(0, 4): curr = (curr | doubles).simplify() * Rational(1, i + 1) h_bar += curr en_eqn = dr.eval_fermi_vev(h_bar) proj = c_dag[i] * c_dag[j] * c_[b] * c_[a] t2_eqn = dr.eval_fermi_vev(proj * h_bar) with open('ccd_eqns.pickle') as fp: pickle.dump([en_eqn, t2_eqn], fp)
mit
Python
1fd6fdbdd7c0cf3764fa0707692346675273a764
allow underscores before quality suffix
crackwitz/videozeug
mp4mark.py
mp4mark.py
#!/usr/bin/env python2 import os import sys import re import glob from subprocess import call, Popen, PIPE files = [] for x in sys.argv[1:]: files += glob.glob(x) or ([x] if os.path.exists(x) else []) #import pdb; pdb.set_trace() base = None for vid in files: m = re.match(r'(.*)[_-]\d+p[_-]ame?\.mp4$', vid) if not m: continue base = m.group(1) if base is None: base = re.sub(r'(.*)-ame\.mp4$', r'\1', files[0]) json = base + "-chapters.json" ffmeta = base + "-chapters.ffmeta" vtt = base + "-chapters.vtt" jumplist = base + "-chapters.txt" mp4select = Popen(['mp4select.py', 'uuid/+16', vid], stdout=PIPE, shell=True) xmpmarkers = Popen(['xmpmarkers.py', '-'], stdin=mp4select.stdout, stdout=open(json, 'w'), shell=True) assert xmpmarkers.wait() == 0 call(['ffmeta.py', 'ffmeta', json, ffmeta], shell=True) call(['ffmeta.py', 'webvtt', json, vtt], shell=True) call(['ffmeta.py', 'jumplist', json, jumplist], shell=True) for invid in files: outvid = invid.replace('-ame', '') assert not os.path.exists(outvid) call(['ffmpeg', '-i', invid, '-i', ffmeta, '-c', 'copy', '-movflags', 'faststart', outvid], shell=True)
#!/usr/bin/env python2 import os import sys import re import glob from subprocess import call, Popen, PIPE files = [] for x in sys.argv[1:]: files += glob.glob(x) or ([x] if os.path.exists(x) else []) #import pdb; pdb.set_trace() base = None for vid in files: m = re.match(r'(.*)-\d+p-ame?\.mp4$', vid) if not m: continue base = m.group(1) if base is None: base = re.sub(r'(.*)-ame\.mp4$', r'\1', files[0]) json = base + "-chapters.json" ffmeta = base + "-chapters.ffmeta" vtt = base + "-chapters.vtt" jumplist = base + "-chapters.txt" mp4select = Popen(['mp4select.py', 'uuid/+16', vid], stdout=PIPE, shell=True) xmpmarkers = Popen(['xmpmarkers.py', '-'], stdin=mp4select.stdout, stdout=open(json, 'w'), shell=True) assert xmpmarkers.wait() == 0 call(['ffmeta.py', 'ffmeta', json, ffmeta], shell=True) call(['ffmeta.py', 'webvtt', json, vtt], shell=True) call(['ffmeta.py', 'jumplist', json, jumplist], shell=True) for invid in files: outvid = invid.replace('-ame', '') assert not os.path.exists(outvid) call(['ffmpeg', '-i', invid, '-i', ffmeta, '-c', 'copy', '-movflags', 'faststart', outvid], shell=True)
mit
Python
b487bad4079773d8537cd46f20164af77e7674fb
change TODO on nice-to-have to avoid triggering code climate
SexualHealthInnovations/callisto-core,SexualHealthInnovations/callisto-core,project-callisto/callisto-core,project-callisto/callisto-core
callisto/delivery/management/commands/find_matches.py
callisto/delivery/management/commands/find_matches.py
import importlib from django.core.management.base import BaseCommand from callisto.delivery.report_delivery import PDFMatchReport from callisto.delivery.matching import find_matches class Command(BaseCommand): help = 'finds matches and sends match reports' def add_arguments(self, parser): parser.add_argument('report_class', nargs='?', default=None) # eventually: add test option that verifies that passed class can be imported & has necessary methods # https://github.com/SexualHealthInnovations/callisto-core/issues/56 def handle(self, *args, **options): report_class_name = options['report_class'] if report_class_name: module_name, class_name = report_class_name.rsplit(".", 1) ReportClass = getattr(importlib.import_module(module_name), class_name) else: ReportClass = PDFMatchReport find_matches(report_class=ReportClass) self.stdout.write('Matching run')
import importlib from django.core.management.base import BaseCommand from callisto.delivery.report_delivery import PDFMatchReport from callisto.delivery.matching import find_matches class Command(BaseCommand): help = 'finds matches and sends match reports' def add_arguments(self, parser): parser.add_argument('report_class', nargs='?', default=None) # TODO: add test option that verifies that passed class can be imported & has necessary methods # https://github.com/SexualHealthInnovations/callisto-core/issues/56 def handle(self, *args, **options): report_class_name = options['report_class'] if report_class_name: module_name, class_name = report_class_name.rsplit(".", 1) ReportClass = getattr(importlib.import_module(module_name), class_name) else: ReportClass = PDFMatchReport find_matches(report_class=ReportClass) self.stdout.write('Matching run')
agpl-3.0
Python
b0f4ebf0cd0999debfdec7a6de972666d28eea98
Update PWM example.
kwagyeman/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,openmv/openmv
usr/examples/02-Board-Control/pwm_control.py
usr/examples/02-Board-Control/pwm_control.py
# PWM Control Example # # This example shows how to do PWM with your OpenMV Cam. import time from pyb import Pin, Timer tim = Timer(4, freq=1000) # Frequency in Hz # Generate a 1KHz square wave on TIM4 with 50% and 75% duty cycles on channels 1 and 2, respectively. ch1 = tim.channel(1, Timer.PWM, pin=Pin("P7"), pulse_width_percent=50) ch2 = tim.channel(2, Timer.PWM, pin=Pin("P8"), pulse_width_percent=75) while (True): time.sleep(1000)
# PWM Control Example # # This example shows how to do PWM with your OpenMV Cam. # # WARNING: PWM control is... not easy with MicroPython. You have to use # the correct timer with the correct pins and channels. As for what the # correct values are - who knows. If you need to change the pins from the # example below please try out different timer/channel/pin configs. import pyb, time t2 = pyb.Timer(1, freq=1000) ch1 = t2.channel(2, pyb.Timer.PWM, pin=pyb.Pin("P0")) ch2 = t2.channel(3, pyb.Timer.PWM, pin=pyb.Pin("P1")) while(True): for i in range(100): ch1.pulse_width_percent(i) ch2.pulse_width_percent(100-i) time.sleep(5) for i in range(100): ch1.pulse_width_percent(100-i) ch2.pulse_width_percent(i) time.sleep(5)
mit
Python
94af46d678055a667220e1a28da509bf507c91dd
change version
backbohne/docx-xslt
docxxslt/__init__.py
docxxslt/__init__.py
import logging from . import engines, package __version__ = '0.1.0' class DocxXsltTemplate(object): """Docx template renderer""" main_document = 'word/document.xml' def __init__(self, filename): self.package = package.Package(filename) self.package.read() def save(self, filename=None, **kwargs): filename = filename or self.package.filename engine = kwargs.pop('engine', engines.DefaultEngine) context = kwargs.pop('context') logger = kwargs.pop('logger', logging.getLogger()) # read docx XML string xml = self.package.get(self.main_document) # render XML xml = engine(logger=logger).render(xml, context) # write docx document self.package.update(self.main_document, xml) self.package.write(filename)
import logging from . import engines, package __version__ = '0.0.2' class DocxXsltTemplate(object): """Docx template renderer""" main_document = 'word/document.xml' def __init__(self, filename): self.package = package.Package(filename) self.package.read() def save(self, filename=None, **kwargs): filename = filename or self.package.filename engine = kwargs.pop('engine', engines.DefaultEngine) context = kwargs.pop('context') logger = kwargs.pop('logger', logging.getLogger()) # read docx XML string xml = self.package.get(self.main_document) # render XML xml = engine(logger=logger).render(xml, context) # write docx document self.package.update(self.main_document, xml) self.package.write(filename)
mit
Python
ac5e24e691089b11abf8a09af862215751d2f401
duplicate action
Amoki/Amoki-Music,Amoki/Amoki-Music,Amoki/Amoki-Music
music/admin.py
music/admin.py
from django.contrib import admin from music.models import Music from player.models import Room from django.contrib.admin.helpers import ActionForm from django import forms from django.db import models from music.serializers import MusicSerializer class UpdateActionForm(ActionForm): rooms = [] for room in Room.objects.all(): tuple = (room.name,room.name) rooms.append(tuple) nrroom = forms.ChoiceField(required="false",label=" Target Room for duplication",choices=rooms) class MusicAdmin(admin.ModelAdmin): list_display = ('name', 'count', 'music_id', 'source', 'duration', 'last_play', 'thumbnail', 'room', 'timer_start') actions = ('add_music','duplicate_music') action_form = UpdateActionForm def has_add_permission(self, request): return False def add_music(self, request, queryset): for music in queryset: # Hack to transform music into dict music.room.add_music(**MusicSerializer(music).data) return def duplicate_music(self,request,queryset): nbrroom = request.POST['nrroom'] roomto = Room.objects.get(name=nbrroom) if roomto: for music in queryset: if roomto.name != music.room.name: new_entry = music new_entry.room=roomto new_entry.id = None new_entry.save() return admin.site.register(Music, MusicAdmin)
from django.contrib import admin from music.models import Music from player.models import Room from django.contrib.admin.helpers import ActionForm from django import forms from django.db import models from music.serializers import MusicSerializer class UpdateActionForm(ActionForm): room = models.CharField(choices=Room.objects.all(),) class MusicAdmin(admin.ModelAdmin): list_display = ('name', 'count', 'music_id', 'source', 'duration', 'last_play', 'thumbnail', 'room', 'timer_start') actions = ('add_music','duplicate_music') action_form = UpdateActionForm def has_add_permission(self, request): return False def add_music(self, request, queryset): for music in queryset: # Hack to transform music into dict music.room.add_music(**MusicSerializer(music).data) return def duplicate_music(self,request,queryset): room = request.POST['room'] for music in queryset: room.add_music(**MusicSerializer(music).data) return admin.site.register(Music, MusicAdmin)
mit
Python
d23ffcf338162ad78c646a69f91e4ff36c894a05
bump to 0.78.1
efiop/dvc,efiop/dvc,dmpetrov/dataversioncontrol,dmpetrov/dataversioncontrol
dvc/version.py
dvc/version.py
# Used in setup.py, so don't pull any additional dependencies # # Based on: # - https://github.com/python/mypy/blob/master/mypy/version.py # - https://github.com/python/mypy/blob/master/mypy/git.py import os import subprocess _BASE_VERSION = "0.78.1" def _generate_version(base_version): """Generate a version with information about the git repository""" pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if not _is_git_repo(pkg_dir) or not _have_git(): return base_version if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir): return base_version return "{base_version}+{short_sha}{dirty}".format( base_version=base_version, short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6], dirty=".mod" if _is_dirty(pkg_dir) else "", ) def _is_git_repo(dir_path): """Is the given directory version-controlled with git?""" return os.path.exists(os.path.join(dir_path, ".git")) def _have_git(): """Can we run the git executable?""" try: subprocess.check_output(["git", "--help"]) return True except subprocess.CalledProcessError: return False except OSError: return False def _is_release(dir_path, base_version): try: output = subprocess.check_output( ["git", "describe", "--tags", "--exact-match"], cwd=dir_path, stderr=subprocess.STDOUT, ).decode("utf-8") tag = output.strip() return tag == base_version except subprocess.CalledProcessError: return False def _git_revision(dir_path): """Get the SHA-1 of the HEAD of a git repository.""" return subprocess.check_output( ["git", "rev-parse", "HEAD"], cwd=dir_path ).strip() def _is_dirty(dir_path): """Check whether a git repository has uncommitted changes.""" try: subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path) return False except subprocess.CalledProcessError: return True __version__ = _generate_version(_BASE_VERSION)
# Used in setup.py, so don't pull any additional dependencies # # Based on: # - https://github.com/python/mypy/blob/master/mypy/version.py # - https://github.com/python/mypy/blob/master/mypy/git.py import os import subprocess _BASE_VERSION = "0.78.0" def _generate_version(base_version): """Generate a version with information about the git repository""" pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if not _is_git_repo(pkg_dir) or not _have_git(): return base_version if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir): return base_version return "{base_version}+{short_sha}{dirty}".format( base_version=base_version, short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6], dirty=".mod" if _is_dirty(pkg_dir) else "", ) def _is_git_repo(dir_path): """Is the given directory version-controlled with git?""" return os.path.exists(os.path.join(dir_path, ".git")) def _have_git(): """Can we run the git executable?""" try: subprocess.check_output(["git", "--help"]) return True except subprocess.CalledProcessError: return False except OSError: return False def _is_release(dir_path, base_version): try: output = subprocess.check_output( ["git", "describe", "--tags", "--exact-match"], cwd=dir_path, stderr=subprocess.STDOUT, ).decode("utf-8") tag = output.strip() return tag == base_version except subprocess.CalledProcessError: return False def _git_revision(dir_path): """Get the SHA-1 of the HEAD of a git repository.""" return subprocess.check_output( ["git", "rev-parse", "HEAD"], cwd=dir_path ).strip() def _is_dirty(dir_path): """Check whether a git repository has uncommitted changes.""" try: subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path) return False except subprocess.CalledProcessError: return True __version__ = _generate_version(_BASE_VERSION)
apache-2.0
Python
43a833dec24f4e0a7dc1d8494a5ad1b44113db15
bump to 0.40.0
dmpetrov/dataversioncontrol,efiop/dvc,efiop/dvc,dmpetrov/dataversioncontrol
dvc/version.py
dvc/version.py
# Used in setup.py, so don't pull any additional dependencies # # Based on: # - https://github.com/python/mypy/blob/master/mypy/version.py # - https://github.com/python/mypy/blob/master/mypy/git.py import os import subprocess _BASE_VERSION = "0.40.0" def _generate_version(base_version): """Generate a version with information about the git repository""" pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if not _is_git_repo(pkg_dir) or not _have_git(): return base_version if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir): return base_version return "{base_version}+{short_sha}{dirty}".format( base_version=base_version, short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6], dirty=".mod" if _is_dirty(pkg_dir) else "", ) def _is_git_repo(dir_path): """Is the given directory version-controlled with git?""" return os.path.exists(os.path.join(dir_path, ".git")) def _have_git(): """Can we run the git executable?""" try: subprocess.check_output(["git", "--help"]) return True except subprocess.CalledProcessError: return False except OSError: return False def _is_release(dir_path, base_version): try: output = subprocess.check_output( ["git", "describe", "--tags", "--exact-match"], cwd=dir_path, stderr=subprocess.STDOUT, ).decode("utf-8") tag = output.strip() return tag == base_version except subprocess.CalledProcessError: return False def _git_revision(dir_path): """Get the SHA-1 of the HEAD of a git repository.""" return subprocess.check_output( ["git", "rev-parse", "HEAD"], cwd=dir_path ).strip() def _is_dirty(dir_path): """Check whether a git repository has uncommitted changes.""" try: subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path) return False except subprocess.CalledProcessError: return True __version__ = _generate_version(_BASE_VERSION)
# Used in setup.py, so don't pull any additional dependencies # # Based on: # - https://github.com/python/mypy/blob/master/mypy/version.py # - https://github.com/python/mypy/blob/master/mypy/git.py import os import subprocess _BASE_VERSION = "0.35.7" def _generate_version(base_version): """Generate a version with information about the git repository""" pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if not _is_git_repo(pkg_dir) or not _have_git(): return base_version if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir): return base_version return "{base_version}+{short_sha}{dirty}".format( base_version=base_version, short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6], dirty=".mod" if _is_dirty(pkg_dir) else "", ) def _is_git_repo(dir_path): """Is the given directory version-controlled with git?""" return os.path.exists(os.path.join(dir_path, ".git")) def _have_git(): """Can we run the git executable?""" try: subprocess.check_output(["git", "--help"]) return True except subprocess.CalledProcessError: return False except OSError: return False def _is_release(dir_path, base_version): try: output = subprocess.check_output( ["git", "describe", "--tags", "--exact-match"], cwd=dir_path, stderr=subprocess.STDOUT, ).decode("utf-8") tag = output.strip() return tag == base_version except subprocess.CalledProcessError: return False def _git_revision(dir_path): """Get the SHA-1 of the HEAD of a git repository.""" return subprocess.check_output( ["git", "rev-parse", "HEAD"], cwd=dir_path ).strip() def _is_dirty(dir_path): """Check whether a git repository has uncommitted changes.""" try: subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path) return False except subprocess.CalledProcessError: return True __version__ = _generate_version(_BASE_VERSION)
apache-2.0
Python
df26ac758c6a38f2e0128e3511db009f0764947f
Bump version to 2.0.0b6
rigetticomputing/pyquil
pyquil/__init__.py
pyquil/__init__.py
__version__ = "2.0.0b6" from pyquil.quil import Program from pyquil.api import list_quantum_computers, get_qc
__version__ = "2.0.0b6.dev0" from pyquil.quil import Program from pyquil.api import list_quantum_computers, get_qc
apache-2.0
Python
eed8d2b6da6e812cf50f83cf6776c8f0fe63d0f2
correct naming of services
makinacorpus/formhub,qlands/onadata,GeoODK/formhub,SEL-Columbia/formhub,ultimateprogramer/formhub,ultimateprogramer/formhub,ehealthafrica-ci/formhub,sounay/flaminggo-test,jomolinare/kobocat,SEL-Columbia/formhub,ehealthafrica-ci/onadata,kobotoolbox/kobocat,hnjamba/onaclone,spatialdev/onadata,GeoODK/onadata,spatialdev/onadata,awemulya/fieldsight-kobocat,eHealthAfrica/onadata,ehealthafrica-ci/formhub,piqoni/onadata,ehealthafrica-ci/onadata,hnjamba/onaclone,smn/onadata,piqoni/onadata,ehealthafrica-ci/formhub,jomolinare/kobocat,GeoODK/onadata,kobotoolbox/kobocat,wesley1001/formhub,makinacorpus/formhub,spatialdev/onadata,awemulya/fieldsight-kobocat,eHealthAfrica/formhub,qlands/onadata,sounay/flaminggo-test,kobotoolbox/kobocat,sounay/flaminggo-test,jomolinare/kobocat,kobotoolbox/kobocat,eHealthAfrica/formhub,ehealthafrica-ci/onadata,awemulya/fieldsight-kobocat,GeoODK/formhub,hnjamba/onaclone,sounay/flaminggo-test,mainakibui/kobocat,wesley1001/formhub,spatialdev/onadata,ultimateprogramer/formhub,ehealthafrica-ci/formhub,eHealthAfrica/onadata,wesley1001/formhub,GeoODK/onadata,GeoODK/formhub,eHealthAfrica/formhub,ehealthafrica-ci/onadata,mainakibui/kobocat,smn/onadata,ultimateprogramer/formhub,piqoni/onadata,piqoni/onadata,mainakibui/kobocat,smn/onadata,GeoODK/onadata,qlands/onadata,wesley1001/formhub,jomolinare/kobocat,awemulya/fieldsight-kobocat,hnjamba/onaclone,eHealthAfrica/onadata,SEL-Columbia/formhub,makinacorpus/formhub,qlands/onadata,smn/onadata,SEL-Columbia/formhub,makinacorpus/formhub,mainakibui/kobocat,eHealthAfrica/formhub
restservice/__init__.py
restservice/__init__.py
SERVICE_CHOICES = ((u'f2dhis2', u'f2dhis2'),(u'generic_json', u'JSON POST'), (u'generic_xml', u'XML POST'),)
SERVICE_CHOICES = ((u'f2dhis2', u'f2dhis2'),(u'json', u'json_exports'),(u'xml', u'xml_exports'),)
bsd-2-clause
Python
1ee501468b07951ccceb263f91d5624f679f0321
Update outputs store setup
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
polyaxon_client/stores/stores/outputs_store.py
polyaxon_client/stores/stores/outputs_store.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from polyaxon_client.stores.exceptions import PolyaxonStoresException from polyaxon_client.stores.stores.base_store import Store class OutputsStore(object): """ A convenient class to store experiment/job outputs to a given/configured store. """ def __init__(self, store=None, outputs_path=None): self._outputs_path = outputs_path if not store: if outputs_path: Store.get_store_for_path(path=outputs_path) else: store = Store.get_store() if isinstance(store, Store): self._store = store else: raise PolyaxonStoresException('Received an unrecognised store `{}`.'.format(store)) def set_store(self, store): self._store = store def set_outputs_path(self, outputs_path): self._outputs_path = outputs_path @property def store(self): return self._store @property def outputs_path(self): return self._outputs_path def upload_file(self, filename, **kwargs): self.store.upload_file(filename, self.outputs_path, **kwargs) def upload_dir(self, dirname, **kwargs): self.store.upload_dir(dirname, self.outputs_path, **kwargs)
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from polyaxon_client.stores.exceptions import PolyaxonStoresException from polyaxon_client.stores.stores.base_store import Store class OutputsStore(object): """ A convenient class to store experiment/job outputs to a given/configured store. """ def __init__(self, store=None, outputs_path=None): self._outputs_path = outputs_path store = store or Store.get_store_for_path(path=outputs_path) if isinstance(store, Store): self._store = store else: raise PolyaxonStoresException('Received an unrecognised store `{}`.'.format(store)) def set_store(self, store): self._store = store def set_outputs_path(self, outputs_path): self._outputs_path = outputs_path @property def store(self): return self._store @property def outputs_path(self): return self._outputs_path def upload_file(self, filename, **kwargs): self.store.upload_file(filename, self.outputs_path, **kwargs) def upload_dir(self, dirname, **kwargs): self.store.upload_dir(dirname, self.outputs_path, **kwargs)
apache-2.0
Python
c37f1ca0f8fc73da95d32082ed8dfb8967e38a1c
Check status method
dvhbru/dvhb-hybrid
dvhb_hybrid/tests.py
dvhb_hybrid/tests.py
class BaseTestApi: """Base class to test API""" API_KEY = 'API-KEY' def __init__(self, client, user): self.client = client self.user = user self.headers = {'content-type': 'application/json'} @staticmethod async def check_status(result, response=HTTPOk): assert result.status == response.status_code, await result.text() @staticmethod async def prepare_result(r): data = None if 'application/json' in r.headers['Content-Type']: data = await r.json() return r, data
class BaseTestApi: """Base class to test API""" API_KEY = 'API-KEY' def __init__(self, client, user): self.client = client self.user = user self.headers = {'content-type': 'application/json'} @staticmethod async def prepare_result(r): data = None if 'application/json' in r.headers['Content-Type']: data = await r.json() return r, data
mit
Python