commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
7f9ca64313fff0716143cf7d56075a565f35d60f
add docstring describing public API (#2140)
tensorflow/tensorboard,tensorflow/tensorboard,tensorflow/tensorboard,tensorflow/tensorboard,tensorflow/tensorboard,tensorflow/tensorboard,tensorflow/tensorboard
tensorboard/plugins/hparams/api.py
tensorboard/plugins/hparams/api.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Experimental public APIs for the HParams plugin. This module supports a spectrum of use cases, depending on how much structure you want. In the simplest case, you can simply collect your hparams into a dict, and use a Keras callback to record them: >>> from tensorboard.plugins.hparams import api as hp >>> hparams = { ... "optimizer": "adam", ... "fc_dropout": 0.2, ... "neurons": 128, ... # ... ... } >>> >>> model = model_fn(hparams) >>> callbacks = [ >>> tf.keras.callbacks.TensorBoard(logdir), >>> hp.KerasCallback(logdir, hparams), >>> ] >>> model.fit(..., callbacks=callbacks) The Keras callback requires that TensorFlow eager execution be enabled. If not using Keras, use the `hparams` function to write the values directly: >>> # In eager mode: >>> with tf.create_file_writer(logdir).as_default(): ... hp.hparams(hparams) >>> >>> # In legacy graph mode: >>> with tf.compat.v2.create_file_writer(logdir).as_default() as w: ... sess.run(w.init()) ... sess.run(hp.hparams(hparams)) ... sess.run(w.flush()) To control how hyperparameters and metrics appear in the TensorBoard UI, you can define `HParam` and `Metric` objects and collect them in an `Experiment`: >>> HP_OPTIMIZER = hp.HParam("optimizer") >>> HP_FC_DROPOUT = hp.HParam( ... "fc_dropout", ... display_name="f.c. dropout", ... description="Dropout rate for fully connected subnet.", ... ) >>> HP_NEURONS = hp.HParam("neurons", description="Neurons per dense layer") >>> >>> experiment = hp.Experiment( ... hparams=[ ... HP_OPTIMIZER, ... HP_FC_DROPOUT, ... HP_NEURONS, ... ], ... metrics=[ ... hp.Metric("xent", group="validation", display_name="cross-entropy"), ... hp.Metric("f1", group="validation", display_name="F₁ score"), ... hp.Metric("loss", group="train", display_name="training loss"), ... ], ... ) >>> with tf.summary.create_file_writer(base_logdir).as_default(): ... hp.hparams_config(experiment) # write experiment summary You can continue to pass a string-keyed dict to the Keras callback or the `hparams` function, or you can use `HParam` objects as the keys. The latter approach enables better static analysis: your favorite Python linter can tell you if you misspell a hyperparameter name, your IDE can help you find all the places where a hyperparameter is used, etc: >>> hparams = { ... HP_OPTIMIZER: "adam", ... HP_FC_DROPOUT: 0.2, ... HP_NEURONS: 128, ... # ... ... } >>> >>> model = model_fn(hparams) >>> callbacks = [ >>> tf.keras.callbacks.TensorBoard(logdir), >>> hp.KerasCallback(logdir, hparams), >>> ] Finally, you can choose to annotate your hparam definitions with domain information: >>> HP_OPTIMIZER = hp.HParam("optimizer", hp.Discrete(["adam", "sgd"])) >>> HP_FC_DROPOUT = hp.HParam("fc_dropout", hp.RealInterval(0.1, 0.4)) >>> HP_NEURONS = hp.HParam("neurons", hp.IntInterval(64, 256)) The TensorBoard HParams plugin does not provide tuners, but you can integrate these domains into your preferred tuning framework if you so desire. The domains will also be reflected in the TensorBoard UI. See the `Experiment`, `HParam`, `Metric`, and `KerasCallback` classes for API specifications. Consult the `hparams_demo.py` script in the TensorBoard repository for an end-to-end MNIST example. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorboard.plugins.hparams import keras from tensorboard.plugins.hparams import summary_v2 Discrete = summary_v2.Discrete Domain = summary_v2.Domain HParam = summary_v2.HParam IntInterval = summary_v2.IntInterval Metric = summary_v2.Metric RealInterval = summary_v2.RealInterval hparams = summary_v2.hparams hparams_pb = summary_v2.hparams_pb hparams_config = summary_v2.hparams_config hparams_config_pb = summary_v2.hparams_config_pb KerasCallback = keras.Callback del absolute_import del division del keras del print_function del summary_v2
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Experimental public APIs for the HParams plugin.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorboard.plugins.hparams import keras from tensorboard.plugins.hparams import summary_v2 Discrete = summary_v2.Discrete Domain = summary_v2.Domain HParam = summary_v2.HParam IntInterval = summary_v2.IntInterval Metric = summary_v2.Metric RealInterval = summary_v2.RealInterval hparams = summary_v2.hparams hparams_pb = summary_v2.hparams_pb hparams_config = summary_v2.hparams_config hparams_config_pb = summary_v2.hparams_config_pb KerasCallback = keras.Callback del absolute_import del division del keras del print_function del summary_v2
apache-2.0
Python
a68bb0d268861d30c26647523991ed215853cdfe
add Reeve post
opencivicdata/scrapers-ca,opencivicdata/scrapers-ca
ca_ab_grande_prairie_county_no_1/people.py
ca_ab_grande_prairie_county_no_1/people.py
from pupa.scrape import Scraper from utils import lxmlize, CanadianLegislator as Legislator import re COUNCIL_PAGE = 'http://www.countygp.ab.ca/EN/main/government/council.html' REEVE_URL = 'http://www.countygp.ab.ca/EN/main/government/council/reeve-message.html' class GrandePrairieCountyNo1PersonScraper(Scraper): # @todo The Reeve is also a Councillor. def get_people(self): reeve_page = lxmlize(REEVE_URL) reeve_name = reeve_page.xpath('string(//b)').split(',')[0] page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//table[@class="table-plain"]/tbody/tr/td[2]') for councillor in councillors: name = councillor.xpath('./h2')[0].text_content().split( 'Division')[0].strip() district = re.findall(r'(Division [0-9])', councillor.xpath('./h2')[0].text_content())[0] p = Legislator(name=name, post_id=district, role='Councillor') if name == reeve_name: p.add_committee_membership('Grande Prairie County No. 1', role='Reeve') p.add_source(COUNCIL_PAGE) image = councillor.xpath('./preceding-sibling::td//img/@src')[0] p.image = image address = councillor.xpath('./p[1]')[0].text_content() email = councillor.xpath('.//a[contains(@href, "mailto:")]')[0].text_content() p.add_contact('address', address, 'legislature') p.add_contact('email', email, None) numbers = councillor.xpath('./p[2]')[0].text_content().replace('Email: ', '').replace(email, '').split(':') for index, number in enumerate(numbers): if index == 0: continue contact_type = re.findall(r'[A-Za-z]+', numbers[index - 1])[0] number = re.findall(r'[0-9]{3}.[0-9]{3}.[0-9]{4}', number)[0].replace('.', '-') if contact_type == 'Fax': p.add_contact('fax', number, 'legislature') elif contact_type == 'Cell': p.add_contact('cell', number, 'legislature') elif contact_type == 'Hm': p.add_contact('voice', number, 'residence') else: raise Exception('Unrecognized contact type %s' % contact_type) yield p
from pupa.scrape import Scraper from utils import lxmlize, CanadianLegislator as Legislator import re COUNCIL_PAGE = 'http://www.countygp.ab.ca/EN/main/government/council.html' class GrandePrairieCountyNo1PersonScraper(Scraper): # @todo The Reeve is also a Councillor. def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//table[@class="table-plain"]/tbody/tr/td[2]') for councillor in councillors: name = councillor.xpath('./h2')[0].text_content().split('Division')[0] district = re.findall(r'(Division [0-9])', councillor.xpath('./h2')[0].text_content())[0] p = Legislator(name=name, post_id=district, role='Councillor') p.add_source(COUNCIL_PAGE) image = councillor.xpath('./preceding-sibling::td//img/@src')[0] p.image = image address = councillor.xpath('./p[1]')[0].text_content() email = councillor.xpath('.//a[contains(@href, "mailto:")]')[0].text_content() p.add_contact('address', address, 'legislature') p.add_contact('email', email, None) numbers = councillor.xpath('./p[2]')[0].text_content().replace('Email: ', '').replace(email, '').split(':') for index, number in enumerate(numbers): if index == 0: continue contact_type = re.findall(r'[A-Za-z]+', numbers[index - 1])[0] number = re.findall(r'[0-9]{3}.[0-9]{3}.[0-9]{4}', number)[0].replace('.', '-') if contact_type == 'Fax': p.add_contact('fax', number, 'legislature') elif contact_type == 'Cell': p.add_contact('cell', number, 'legislature') elif contact_type == 'Hm': p.add_contact('voice', number, 'residence') else: raise Exception('Unrecognized contact type %s' % contact_type) yield p
mit
Python
9c7dda9f55369109831eb53f4ed1da5fe82cfc7b
Fix test for observation_aggregator
niboshi/chainer,niboshi/chainer,wkentaro/chainer,pfnet/chainer,hvy/chainer,chainer/chainer,niboshi/chainer,wkentaro/chainer,wkentaro/chainer,wkentaro/chainer,hvy/chainer,niboshi/chainer,hvy/chainer,chainer/chainer,hvy/chainer,chainer/chainer,chainer/chainer
tests/chainermn_tests/extensions_tests/test_observation_aggregator.py
tests/chainermn_tests/extensions_tests/test_observation_aggregator.py
import unittest import numpy as np import chainer import chainer.testing from chainer.training import extension import chainermn from chainermn.extensions.observation_aggregator import observation_aggregator class DummyChain(chainer.Chain): def __init__(self): super(DummyChain, self).__init__() def forward(self, x): return chainer.Variable(x, grad=np.array([0])) class TestObservationAggregator(unittest.TestCase): def setUp(self): self.communicator = chainermn.create_communicator('naive') def test_observation_aggregator(self): model = DummyChain() comm = self.communicator optimizer = chainermn.create_multi_node_optimizer( chainer.optimizers.Adam(), self.communicator) optimizer.setup(model) train = np.random.rand(10, 1) train_iter = chainer.iterators.SerialIterator(train, batch_size=1, repeat=True, shuffle=True) updater = chainer.training.StandardUpdater(train_iter, optimizer) trainer = chainer.training.Trainer(updater, (1, 'epoch')) @extension.make_extension( trigger=(2, 'iteration'), priority=extension.PRIORITY_WRITER) def rank_reporter(trainer): trainer.observation['rank'] = comm.rank @extension.make_extension( trigger=(2, 'iteration'), priority=extension.PRIORITY_READER) def aggregated_rank_checker(trainer): actual = trainer.observation['rank-aggregated'] expected = (comm.size - 1) / 2.0 chainer.testing.assert_allclose(actual, expected) trainer.extend(rank_reporter) trainer.extend(observation_aggregator(comm, 'rank', 'rank-aggregated')) trainer.extend(aggregated_rank_checker) trainer.run()
import unittest import numpy as np import chainer import chainer.testing from chainer.training import extension import chainermn from chainermn.extensions.observation_aggregator import observation_aggregator class DummyChain(chainer.Chain): def __init__(self): super(DummyChain, self).__init__() def forward(self, x): return chainer.Variable(x, grad=np.array([0])) class TestObservationAggregator(unittest.TestCase): def setUp(self): self.communicator = chainermn.create_communicator('naive') def test_observation_aggregator(self): model = DummyChain() comm = self.communicator optimizer = chainermn.create_multi_node_optimizer( chainer.optimizers.Adam(), self.communicator) optimizer.setup(model) train = np.random.rand(10, 1) train_iter = chainer.iterators.SerialIterator(train, batch_size=1, repeat=True, shuffle=True) updater = chainer.training.StandardUpdater(train_iter, optimizer) trainer = chainer.training.Trainer(updater, (1, 'epoch')) @extension.make_extension( trigger=(2, 'iteration'), priority=extension.PRIORITY_WRITER) def rank_reporter(trainer): trainer.observation['rank'] = comm.rank @extension.make_extension( trigger=(2, 'iteration'), priority=extension.PRIORITY_READER) def aggregated_rank_checker(trainer): actual = trainer.observation['rank-aggregated'] expected = (comm.size - 1) / 2 chainer.testing.assert_allclose(actual, expected) trainer.extend(rank_reporter) trainer.extend(observation_aggregator(comm, 'rank', 'rank-aggregated')) trainer.extend(aggregated_rank_checker) trainer.run()
mit
Python
c3a251588868ace81e8e4e0bbe29828495d759d9
fix command line arguments
FidoProject/Hardware,FidoProject/Hardware,FidoProject/Hardware,FidoProject/Hardware
ThingThree/Code/Dotstar/strandtest.py
ThingThree/Code/Dotstar/strandtest.py
#!/usr/bin/python import time, math, sys from dotstar import Adafruit_DotStar numPixels = 24 dataPin = 17 clockPin = 27 strip = Adafruit_DotStar(numPixels, dataPin, clockPin) strip.begin() strip.setBrightness(255) def scale(color, brightness): str_hex = hex(color)[2:].zfill(6) r,g,b = (int(str_hex[2*x:2*x+2],16)*(brightness/255.0) for x in xrange(3)) return (int(r) << 8) + (int(g) << 16) + int(b) def pulseFade(color): for brightness in range(0,255): for i in range(0,numPixels): strip.setPixelColor(i, scale(color,brightness)) strip.show() time.sleep(0.01) for brightness in range(255,0,-1): for i in range(0,numPixels): strip.setPixelColor(i, scale(color,brightness)) strip.show() time.sleep(0.001) def pulseFromMiddle(color): for i in range(0,numPixels/2): strip.setPixelColor(numPixels/2 + i, color); strip.setPixelColor(numPixels/2 - i, color); strip.show(); time.sleep(0.02); for i in range(0,numPixels/2): strip.setPixelColor(i, 0); strip.setPixelColor(numPixels-i, 0); strip.show(); time.sleep(0.02); def cycle(color=-1): head = 0 tail = -10 curColor = 0xFF0000 if (color == -1) else color while True: strip.setPixelColor(head,curColor) strip.setPixelColor(tail,0) strip.show() time.sleep(0.02) head += 1 if (head >= numPixels): head = 0 if (color == -1): curColor >>= 8 if (curColor == 0): curColor = 0xFF0000 tail += 1 if (tail >= numPixels): tail = 0 def pulseCycle(color, cycles): head = 0 tail = -10 iters = 0 while iters < cycles: strip.setPixelColor(head,color) strip.setPixelColor(tail,0) strip.show() time.sleep(0.02) head += 1 if (head >= numPixels): head = 0 iters += 1 tail += 1 if (tail >= numPixels): tail = 0 while tail <= numPixels: strip.setPixelColor(tail,0) strip.show() time.sleep(0.02) tail += 1 def breathe(color): while True: millis = int(round(time.time() * 1000)) brightness = (math.exp(math.sin(millis/2000.0*math.pi)) - 0.36787944)*108.0; for i in range(0,numPixels): strip.setPixelColor(i, scale(color,brightness)) strip.show() time.sleep(0.02) pulseCycle(int(sys.argv[1],0),int(sys.argv[2]))
#!/usr/bin/python import time, math, sys from dotstar import Adafruit_DotStar numPixels = 24 dataPin = 17 clockPin = 27 strip = Adafruit_DotStar(numPixels, dataPin, clockPin) strip.begin() strip.setBrightness(255) def scale(color, brightness): str_hex = hex(color)[2:].zfill(6) r,g,b = (int(str_hex[2*x:2*x+2],16)*(brightness/255.0) for x in xrange(3)) return (int(r) << 8) + (int(g) << 16) + int(b) def pulseFade(color): for brightness in range(0,255): for i in range(0,numPixels): strip.setPixelColor(i, scale(color,brightness)) strip.show() time.sleep(0.01) for brightness in range(255,0,-1): for i in range(0,numPixels): strip.setPixelColor(i, scale(color,brightness)) strip.show() time.sleep(0.001) def pulseFromMiddle(color): for i in range(0,numPixels/2): strip.setPixelColor(numPixels/2 + i, color); strip.setPixelColor(numPixels/2 - i, color); strip.show(); time.sleep(0.02); for i in range(0,numPixels/2): strip.setPixelColor(i, 0); strip.setPixelColor(numPixels-i, 0); strip.show(); time.sleep(0.02); def cycle(color=-1): head = 0 tail = -10 curColor = 0xFF0000 if (color == -1) else color while True: strip.setPixelColor(head,curColor) strip.setPixelColor(tail,0) strip.show() time.sleep(0.02) head += 1 if (head >= numPixels): head = 0 if (color == -1): curColor >>= 8 if (curColor == 0): curColor = 0xFF0000 tail += 1 if (tail >= numPixels): tail = 0 def pulseCycle(color, cycles): head = 0 tail = -10 iters = 0 while iters < cycles: strip.setPixelColor(head,color) strip.setPixelColor(tail,0) strip.show() time.sleep(0.02) head += 1 if (head >= numPixels): head = 0 iters += 1 tail += 1 if (tail >= numPixels): tail = 0 while tail <= numPixels: strip.setPixelColor(tail,0) strip.show() time.sleep(0.02) tail += 1 def breathe(color): while True: millis = int(round(time.time() * 1000)) brightness = (math.exp(math.sin(millis/2000.0*math.pi)) - 0.36787944)*108.0; for i in range(0,numPixels): strip.setPixelColor(i, scale(color,brightness)) strip.show() time.sleep(0.02) pulseCycle(int(sys.argv[0],0),int(sys.argv[1]))
apache-2.0
Python
a171595f029b43af27d14a125e68647e2206c6d5
Update __init__.py
r0h4n/commons,Tendrl/commons
tendrl/commons/objects/node_alert_counters/__init__.py
tendrl/commons/objects/node_alert_counters/__init__.py
from tendrl.commons import objects class NodeAlertCounters(objects.BaseObject): def __init__( self, warn_count=0, node_id=None, *args, **kwargs ): super(NodeAlertCounters, self).__init__(*args, **kwargs) self.warning_count = warn_count self.node_id = node_id self.value = '/nodes/{0}/alert_counters' def render(self): self.value = self.value.format(self.node_id or NS.node_context.node_id) return super(NodeAlertCounters, self).render() def save(self, *args, **kwargs): NS.tendrl.objects.ClusterNodeAlertCounters(warn_count=self.warning_count, node_id=self.node_id, integration_id=NS.tendrl_context.integration_id).save() super(NodeAlertCounters, self).save(*args, **kwargs)
from tendrl.commons import objects class NodeAlertCounters(objects.BaseObject): def __init__( self, warn_count=0, node_id=None, *args, **kwargs ): super(NodeAlertCounters, self).__init__(*args, **kwargs) self.warning_count = warn_count self.node_id = node_id self.value = '/nodes/{0}/alert_counters' def render(self): self.value = self.value.format(self.node_id or NS.node_context.node_id) return super(NodeAlertCounters, self).render() def save(self, *args, **kwargs): NS.tendrl.objects.ClusterNodeAlertCounters(warn_count=self.warning_count, node_id=self.node_id).save() super(NodeAlertCounters, self).save(*args, **kwargs)
lgpl-2.1
Python
dd7e0d18a15195cf67af44af8c15918a5cf068e4
add header information
changbinwang/bookinfo,changbinwang/bookinfo
douban_book_api.py
douban_book_api.py
from douban_client.api.error import DoubanAPIError import requests import simplejson from douban_client import DoubanClient __author__ = 'owen2785' baseurl = 'https://api.douban.com/v2/book/isbn/' def getbyisbn_without_auth(isbn): r = requests.get(baseurl+str(isbn),headers=headers) print r.headers print r.request.headers return r.json()
from douban_client.api.error import DoubanAPIError import requests import simplejson from douban_client import DoubanClient __author__ = 'owen2785' baseurl = 'https://api.douban.com/v2/book/isbn/' def getbyisbn_without_auth(isbn): r = requests.get(baseurl+str(isbn)) return r.json()
apache-2.0
Python
396ab20874a0c3492482a8ae03fd7d61980917a5
Update closest match adapter docstring.
Reinaesaya/OUIRL-ChatBot,vkosuri/ChatterBot,gunthercox/ChatterBot,Gustavo6046/ChatterBot,maclogan/VirtualPenPal,Reinaesaya/OUIRL-ChatBot,davizucon/ChatterBot
chatterbot/adapters/logic/closest_match.py
chatterbot/adapters/logic/closest_match.py
# -*- coding: utf-8 -*- from fuzzywuzzy import fuzz from .base_match import BaseMatchAdapter class ClosestMatchAdapter(BaseMatchAdapter): """ The ClosestMatchAdapter logic adapter selects a known response to an input by searching for a known statement that most closely matches the input based on the Levenshtein Distance between the text of each statement. """ def get(self, input_statement): """ Takes a statement string and a list of statement strings. Returns the closest matching statement from the list. """ statement_list = self.context.storage.get_response_statements() if not statement_list: if self.has_storage_context: # Use a randomly picked statement self.logger.info( u'No statements have known responses. ' + u'Choosing a random response to return.' ) return 0, self.context.storage.get_random() else: raise self.EmptyDatasetException() confidence = -1 closest_match = input_statement # Find the closest matching known statement for statement in statement_list: ratio = fuzz.ratio(input_statement.text.lower(), statement.text.lower()) if ratio > confidence: confidence = ratio closest_match = statement # Convert the confidence integer to a percent confidence /= 100.0 return confidence, closest_match
# -*- coding: utf-8 -*- from fuzzywuzzy import fuzz from .base_match import BaseMatchAdapter class ClosestMatchAdapter(BaseMatchAdapter): """ The ClosestMatchAdapter logic adapter creates a response by using fuzzywuzzy's process class to extract the most similar response to the input. This adapter selects a response to an input statement by selecting the closest known matching statement based on the Levenshtein Distance between the text of each statement. """ def get(self, input_statement): """ Takes a statement string and a list of statement strings. Returns the closest matching statement from the list. """ statement_list = self.context.storage.get_response_statements() if not statement_list: if self.has_storage_context: # Use a randomly picked statement self.logger.info( u'No statements have known responses. ' + u'Choosing a random response to return.' ) return 0, self.context.storage.get_random() else: raise self.EmptyDatasetException() confidence = -1 closest_match = input_statement # Find the closest matching known statement for statement in statement_list: ratio = fuzz.ratio(input_statement.text.lower(), statement.text.lower()) if ratio > confidence: confidence = ratio closest_match = statement # Convert the confidence integer to a percent confidence /= 100.0 return confidence, closest_match
bsd-3-clause
Python
2947fe97d466872de05ada289d9172f41895969c
Update GOV.UK Frontend/Jinja lib test
alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin
tests/templates/components/test_radios_with_images.py
tests/templates/components/test_radios_with_images.py
import json from importlib import metadata from packaging.version import Version def test_govuk_frontend_jinja_overrides_on_design_system_v3(): with open("package.json") as package_file: package_json = json.load(package_file) govuk_frontend_version = Version(package_json["dependencies"]["govuk-frontend"]) govuk_frontend_jinja_version = Version(metadata.version("govuk-frontend-jinja")) # This should be checking govuk_frontend_version == 3.14.x, but we're not there yet. Update this when we are. # Compatibility between these two libs is defined at https://github.com/LandRegistry/govuk-frontend-jinja/ correct_govuk_frontend_version = Version("3.0.0") <= govuk_frontend_version < Version("4.0.0") correct_govuk_frontend_jinja_version = Version("1.5.0") <= govuk_frontend_jinja_version < Version("1.6.0") assert correct_govuk_frontend_version and correct_govuk_frontend_jinja_version, ( "After upgrading either of the Design System packages, you must validate that " "`app/templates/govuk_frontend_jinja_overrides/templates/components/*/template.html`" "are all structurally-correct and up-to-date macros. If not, update the macros or retire them and update the " "rendering process." )
import json def test_govuk_frontend_jinja_overrides_on_design_system_v3(): with open("package.json") as package_file: package_json = json.load(package_file) assert package_json["dependencies"]["govuk-frontend"].startswith("3."), ( "After upgrading the Design System, manually validate that " "`app/templates/govuk_frontend_jinja_overrides/templates/components/*/template.html`" "are all structurally-correct and up-to-date macros. If not, update the macros or retire them and update the " "rendering process." )
mit
Python
f4a80c720d0164eb8a942e3ad1b5244d30800e5a
Add --allow-nacl-socket-api for the chromoting functional test.
krieger-od/nwjs_chromium.src,markYoungH/chromium.src,dednal/chromium.src,junmin-zhu/chromium-rivertrail,hujiajie/pa-chromium,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,robclark/chromium,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,hujiajie/pa-chromium,markYoungH/chromium.src,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,robclark/chromium,timopulkkinen/BubbleFish,timopulkkinen/BubbleFish,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,Just-D/chromium-1,ltilve/chromium,jaruba/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,zcbenz/cefode-chromium,dushu1203/chromium.src,mogoweb/chromium-crosswalk,keishi/chromium,Jonekee/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,robclark/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,hujiajie/pa-chromium,fujunwei/chromium-crosswalk,dushu1203/chromium.src,mogoweb/chromium-crosswalk,zcbenz/cefode-chromium,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,ltilve/chromium,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,robclark/chromium,anirudhSK/chromium,jaruba/chromium.src,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,Chilledheart/chromium,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,nacl-webkit/chrome_deps,Chilledheart/chromium,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,keishi/chromium,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,M4sse/chromium.src,jaruba/chromium.src,keishi/chromium,robclark/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,mogoweb/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,jaruba/chromium.src,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,hujiajie/pa-chromium,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,patrickm/chromium.src,littlstar/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,pozdnyakov/chromium-crosswalk,ltilve/chromium,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,patrickm/chromium.src,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,ondra-novak/chromium.src,zcbenz/cefode-chromium,junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,hujiajie/pa-chromium,ChromiumWebApps/chromium,littlstar/chromium.src,timopulkkinen/BubbleFish,littlstar/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,nacl-webkit/chrome_deps,anirudhSK/chromium,chuan9/chromium-crosswalk,ondra-novak/chromium.src,ltilve/chromium,dednal/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,keishi/chromium,Fireblend/chromium-crosswalk,junmin-zhu/chromium-rivertrail,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,dednal/chromium.src,keishi/chromium,zcbenz/cefode-chromium,junmin-zhu/chromium-rivertrail,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,ltilve/chromium,dushu1203/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,bright-sparks/chromium-spacewalk,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,timopulkkinen/BubbleFish,patrickm/chromium.src,keishi/chromium,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ltilve/chromium,keishi/chromium,dednal/chromium.src,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,robclark/chromium,dednal/chromium.src,M4sse/chromium.src,M4sse/chromium.src,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,robclark/chromium,ChromiumWebApps/chromium,littlstar/chromium.src,Chilledheart/chromium,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,Just-D/chromium-1,Jonekee/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,hujiajie/pa-chromium,anirudhSK/chromium,Jonekee/chromium.src,ChromiumWebApps/chromium,robclark/chromium,PeterWangIntel/chromium-crosswalk,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,keishi/chromium,hujiajie/pa-chromium,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,keishi/chromium,jaruba/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,keishi/chromium,dednal/chromium.src,Chilledheart/chromium,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,zcbenz/cefode-chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,nacl-webkit/chrome_deps,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,keishi/chromium,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,ltilve/chromium,robclark/chromium,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,nacl-webkit/chrome_deps,hujiajie/pa-chromium,anirudhSK/chromium,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,nacl-webkit/chrome_deps,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,jaruba/chromium.src,robclark/chromium,fujunwei/chromium-crosswalk,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,junmin-zhu/chromium-rivertrail
chrome/test/functional/chromoting_basic.py
chrome/test/functional/chromoting_basic.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import pyauto_functional # Must come before chromoting and pyauto. import chromoting import pyauto class ChromotingBasic(chromoting.ChromotingMixIn, pyauto.PyUITest): """Basic tests for Chromoting.""" _EXTRA_CHROME_FLAGS = [ '--allow-nacl-socket-api=*', ] def ExtraChromeFlags(self): """Ensures Chrome is launched with some custom flags. Overrides the default list of extra flags passed to Chrome. See ExtraChromeFlags() in pyauto.py. """ return pyauto.PyUITest.ExtraChromeFlags(self) + self._EXTRA_CHROME_FLAGS def setUp(self): """Set up test for Chromoting on both local and remote machines. Installs the Chromoting app, launches it, and authenticates using the default Chromoting test account. """ super(ChromotingBasic, self).setUp() self._app = self.InstallExtension(self.GetWebappPath()) self.LaunchApp(self._app) account = self.GetPrivateInfo()['test_chromoting_account'] self.Authenticate(account['username'], account['password']) def testChromoting(self): """Verify that we can start and disconnect from a Chromoting session.""" client_local = (self.remote == None) host = self client = self if client_local else self.remote client_tab_index = 2 if client_local else 1 access_code = host.Share() self.assertTrue(access_code, msg='Host attempted to share, but it failed. ' 'No access code was found.') if client_local: client.LaunchApp(self._app) self.assertTrue(client.Connect(access_code, client_tab_index), msg='The client attempted to connect to the host, ' 'but the chromoting session did not start.') host.CancelShare() client.Disconnect(client_tab_index) if __name__ == '__main__': pyauto_functional.Main()
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import pyauto_functional # Must come before chromoting and pyauto. import chromoting import pyauto class ChromotingBasic(chromoting.ChromotingMixIn, pyauto.PyUITest): """Basic tests for Chromoting.""" def setUp(self): """Set up test for Chromoting on both local and remote machines. Installs the Chromoting app, launches it, and authenticates using the default Chromoting test account. """ super(ChromotingBasic, self).setUp() self._app = self.InstallExtension(self.GetWebappPath()) self.LaunchApp(self._app) account = self.GetPrivateInfo()['test_chromoting_account'] self.Authenticate(account['username'], account['password']) def testChromoting(self): """Verify that we can start and disconnect from a Chromoting session.""" client_local = (self.remote == None) host = self client = self if client_local else self.remote client_tab_index = 2 if client_local else 1 access_code = host.Share() self.assertTrue(access_code, msg='Host attempted to share, but it failed. ' 'No access code was found.') if client_local: client.LaunchApp(self._app) self.assertTrue(client.Connect(access_code, client_tab_index), msg='The client attempted to connect to the host, ' 'but the chromoting session did not start.') host.CancelShare() client.Disconnect(client_tab_index) if __name__ == '__main__': pyauto_functional.Main()
bsd-3-clause
Python
0b366a3f4c23b644f885ed649edc577242ae90ee
Fix genreflex rootmap files to not contain stray spaces after "string" Corrsponds to v5-22-00-patches r27408
karies/root,thomaskeck/root,mkret2/root,sawenzel/root,smarinac/root,0x0all/ROOT,mhuwiler/rootauto,zzxuanyuan/root-compressor-dummy,Duraznos/root,krafczyk/root,beniz/root,mhuwiler/rootauto,strykejern/TTreeReader,vukasinmilosevic/root,arch1tect0r/root,sbinet/cxx-root,lgiommi/root,omazapa/root,esakellari/my_root_for_test,thomaskeck/root,evgeny-boger/root,simonpf/root,Duraznos/root,omazapa/root,sirinath/root,mhuwiler/rootauto,krafczyk/root,arch1tect0r/root,georgtroska/root,gganis/root,perovic/root,CristinaCristescu/root,beniz/root,olifre/root,zzxuanyuan/root-compressor-dummy,jrtomps/root,zzxuanyuan/root-compressor-dummy,dfunke/root,omazapa/root-old,agarciamontoro/root,cxx-hep/root-cern,pspe/root,gganis/root,sawenzel/root,esakellari/root,beniz/root,nilqed/root,jrtomps/root,omazapa/root-old,beniz/root,krafczyk/root,tc3t/qoot,perovic/root,root-mirror/root,sbinet/cxx-root,ffurano/root5,abhinavmoudgil95/root,davidlt/root,veprbl/root,abhinavmoudgil95/root,sawenzel/root,thomaskeck/root,omazapa/root-old,Dr15Jones/root,agarciamontoro/root,0x0all/ROOT,esakellari/root,gbitzes/root,davidlt/root,CristinaCristescu/root,smarinac/root,Duraznos/root,lgiommi/root,arch1tect0r/root,alexschlueter/cern-root,sirinath/root,sbinet/cxx-root,abhinavmoudgil95/root,nilqed/root,CristinaCristescu/root,simonpf/root,abhinavmoudgil95/root,arch1tect0r/root,Duraznos/root,cxx-hep/root-cern,nilqed/root,mkret2/root,CristinaCristescu/root,root-mirror/root,karies/root,thomaskeck/root,evgeny-boger/root,smarinac/root,ffurano/root5,georgtroska/root,Y--/root,simonpf/root,evgeny-boger/root,root-mirror/root,omazapa/root-old,beniz/root,mattkretz/root,vukasinmilosevic/root,arch1tect0r/root,sawenzel/root,mkret2/root,agarciamontoro/root,bbockelm/root,alexschlueter/cern-root,arch1tect0r/root,olifre/root,omazapa/root-old,pspe/root,georgtroska/root,olifre/root,thomaskeck/root,abhinavmoudgil95/root,Y--/root,smarinac/root,omazapa/root,BerserkerTroll/root,esakellari/root,mattkretz/root,perovic/root,krafczyk/root,zzxuanyuan/root-compressor-dummy,cxx-hep/root-cern,sbinet/cxx-root,esakellari/my_root_for_test,kirbyherm/root-r-tools,perovic/root,zzxuanyuan/root-compressor-dummy,0x0all/ROOT,krafczyk/root,agarciamontoro/root,zzxuanyuan/root,esakellari/my_root_for_test,bbockelm/root,mhuwiler/rootauto,satyarth934/root,kirbyherm/root-r-tools,gganis/root,krafczyk/root,vukasinmilosevic/root,buuck/root,beniz/root,simonpf/root,beniz/root,georgtroska/root,beniz/root,ffurano/root5,jrtomps/root,agarciamontoro/root,evgeny-boger/root,Dr15Jones/root,root-mirror/root,lgiommi/root,esakellari/root,simonpf/root,sirinath/root,zzxuanyuan/root,jrtomps/root,tc3t/qoot,CristinaCristescu/root,omazapa/root,agarciamontoro/root,buuck/root,karies/root,zzxuanyuan/root-compressor-dummy,simonpf/root,vukasinmilosevic/root,zzxuanyuan/root,Duraznos/root,mattkretz/root,Y--/root,mkret2/root,simonpf/root,zzxuanyuan/root,lgiommi/root,sirinath/root,veprbl/root,gbitzes/root,dfunke/root,BerserkerTroll/root,mhuwiler/rootauto,smarinac/root,Duraznos/root,Y--/root,mkret2/root,zzxuanyuan/root-compressor-dummy,CristinaCristescu/root,nilqed/root,nilqed/root,gganis/root,mattkretz/root,zzxuanyuan/root-compressor-dummy,CristinaCristescu/root,olifre/root,Dr15Jones/root,evgeny-boger/root,smarinac/root,esakellari/root,vukasinmilosevic/root,sbinet/cxx-root,agarciamontoro/root,sirinath/root,bbockelm/root,thomaskeck/root,simonpf/root,omazapa/root,perovic/root,pspe/root,ffurano/root5,Y--/root,olifre/root,CristinaCristescu/root,beniz/root,BerserkerTroll/root,gbitzes/root,root-mirror/root,satyarth934/root,jrtomps/root,beniz/root,zzxuanyuan/root-compressor-dummy,satyarth934/root,Y--/root,satyarth934/root,olifre/root,gganis/root,mkret2/root,krafczyk/root,davidlt/root,krafczyk/root,tc3t/qoot,jrtomps/root,abhinavmoudgil95/root,Dr15Jones/root,karies/root,satyarth934/root,georgtroska/root,CristinaCristescu/root,bbockelm/root,tc3t/qoot,esakellari/my_root_for_test,BerserkerTroll/root,sirinath/root,zzxuanyuan/root,zzxuanyuan/root,pspe/root,zzxuanyuan/root,dfunke/root,krafczyk/root,davidlt/root,esakellari/root,bbockelm/root,nilqed/root,strykejern/TTreeReader,karies/root,sawenzel/root,root-mirror/root,Dr15Jones/root,esakellari/root,root-mirror/root,lgiommi/root,karies/root,kirbyherm/root-r-tools,omazapa/root-old,pspe/root,alexschlueter/cern-root,satyarth934/root,Y--/root,strykejern/TTreeReader,evgeny-boger/root,mkret2/root,tc3t/qoot,BerserkerTroll/root,simonpf/root,davidlt/root,krafczyk/root,sawenzel/root,buuck/root,pspe/root,Y--/root,georgtroska/root,evgeny-boger/root,Dr15Jones/root,zzxuanyuan/root,kirbyherm/root-r-tools,BerserkerTroll/root,mattkretz/root,pspe/root,beniz/root,georgtroska/root,sbinet/cxx-root,ffurano/root5,root-mirror/root,dfunke/root,esakellari/my_root_for_test,mhuwiler/rootauto,davidlt/root,abhinavmoudgil95/root,arch1tect0r/root,bbockelm/root,abhinavmoudgil95/root,georgtroska/root,sawenzel/root,zzxuanyuan/root,omazapa/root-old,omazapa/root,omazapa/root-old,bbockelm/root,pspe/root,mattkretz/root,buuck/root,esakellari/my_root_for_test,gbitzes/root,0x0all/ROOT,root-mirror/root,esakellari/my_root_for_test,simonpf/root,lgiommi/root,mattkretz/root,abhinavmoudgil95/root,sirinath/root,kirbyherm/root-r-tools,esakellari/root,alexschlueter/cern-root,buuck/root,thomaskeck/root,esakellari/my_root_for_test,karies/root,dfunke/root,omazapa/root-old,lgiommi/root,gbitzes/root,root-mirror/root,0x0all/ROOT,sirinath/root,gbitzes/root,satyarth934/root,strykejern/TTreeReader,satyarth934/root,cxx-hep/root-cern,bbockelm/root,BerserkerTroll/root,bbockelm/root,bbockelm/root,arch1tect0r/root,veprbl/root,agarciamontoro/root,evgeny-boger/root,karies/root,perovic/root,BerserkerTroll/root,tc3t/qoot,tc3t/qoot,mkret2/root,CristinaCristescu/root,karies/root,nilqed/root,georgtroska/root,veprbl/root,esakellari/root,dfunke/root,simonpf/root,satyarth934/root,satyarth934/root,tc3t/qoot,nilqed/root,gbitzes/root,veprbl/root,tc3t/qoot,kirbyherm/root-r-tools,sirinath/root,vukasinmilosevic/root,gbitzes/root,vukasinmilosevic/root,sawenzel/root,veprbl/root,omazapa/root,veprbl/root,zzxuanyuan/root-compressor-dummy,esakellari/root,mkret2/root,0x0all/ROOT,davidlt/root,Duraznos/root,mattkretz/root,mkret2/root,gganis/root,dfunke/root,tc3t/qoot,smarinac/root,lgiommi/root,Duraznos/root,CristinaCristescu/root,gganis/root,evgeny-boger/root,0x0all/ROOT,buuck/root,lgiommi/root,jrtomps/root,mattkretz/root,zzxuanyuan/root-compressor-dummy,davidlt/root,sbinet/cxx-root,jrtomps/root,buuck/root,omazapa/root,gbitzes/root,gganis/root,mkret2/root,veprbl/root,Dr15Jones/root,thomaskeck/root,sawenzel/root,agarciamontoro/root,omazapa/root-old,perovic/root,arch1tect0r/root,sbinet/cxx-root,smarinac/root,nilqed/root,mattkretz/root,pspe/root,agarciamontoro/root,olifre/root,Duraznos/root,BerserkerTroll/root,pspe/root,esakellari/my_root_for_test,ffurano/root5,veprbl/root,BerserkerTroll/root,olifre/root,sbinet/cxx-root,georgtroska/root,mhuwiler/rootauto,veprbl/root,jrtomps/root,Duraznos/root,gbitzes/root,alexschlueter/cern-root,strykejern/TTreeReader,dfunke/root,cxx-hep/root-cern,dfunke/root,cxx-hep/root-cern,mhuwiler/rootauto,nilqed/root,mattkretz/root,smarinac/root,perovic/root,georgtroska/root,omazapa/root-old,arch1tect0r/root,olifre/root,veprbl/root,buuck/root,perovic/root,arch1tect0r/root,omazapa/root,gganis/root,satyarth934/root,esakellari/root,jrtomps/root,davidlt/root,sirinath/root,nilqed/root,olifre/root,0x0all/ROOT,sawenzel/root,omazapa/root,krafczyk/root,evgeny-boger/root,karies/root,alexschlueter/cern-root,jrtomps/root,omazapa/root,cxx-hep/root-cern,dfunke/root,agarciamontoro/root,vukasinmilosevic/root,mhuwiler/rootauto,karies/root,vukasinmilosevic/root,root-mirror/root,strykejern/TTreeReader,buuck/root,esakellari/my_root_for_test,abhinavmoudgil95/root,vukasinmilosevic/root,kirbyherm/root-r-tools,BerserkerTroll/root,mhuwiler/rootauto,Duraznos/root,sbinet/cxx-root,mhuwiler/rootauto,thomaskeck/root,gganis/root,ffurano/root5,zzxuanyuan/root,perovic/root,buuck/root,thomaskeck/root,olifre/root,bbockelm/root,perovic/root,0x0all/ROOT,sawenzel/root,evgeny-boger/root,gbitzes/root,pspe/root,Y--/root,cxx-hep/root-cern,zzxuanyuan/root,lgiommi/root,Y--/root,vukasinmilosevic/root,zzxuanyuan/root,smarinac/root,buuck/root,sirinath/root,alexschlueter/cern-root,davidlt/root,Y--/root,dfunke/root,abhinavmoudgil95/root,davidlt/root,lgiommi/root,gganis/root,strykejern/TTreeReader,sbinet/cxx-root
cint/reflex/python/genreflex/genrootmap.py
cint/reflex/python/genreflex/genrootmap.py
# Copyright CERN, CH-1211 Geneva 23, 2004-2006, All rights reserved. # # Permission to use, copy, modify, and distribute this software for any # purpose is hereby granted without fee, provided that this copyright and # permissions notice appear in all copies and derivatives. # # This software is provided "as is" without express or implied warranty. import os, sys, string, re model = """ # This file has been generated by genreflex with the --rootmap option #--Final End """ #---------------------------------------------------------------------------------- def isRootmapVetoed(c) : if c.has_key('extra') and 'rootmap' in c['extra'] : rootmapsel = c['extra']['rootmap'].lower() return (rootmapsel == 'false' or rootmapsel == '0') return False #---------------------------------------------------------------------------------- def genRootMap(mapfile, dicfile, libfile, cnames, classes) : startmark = '#--Begin ' + dicfile + '\n' endmark = '#--End ' + dicfile + '\n' finalmark = '#--Final End\n' transtable = string.maketrans(': ', '@-') transtable = string.maketrans(': ', '@-') for c in classes : c['fullname'] = c.get('fullname', c['name']) # filter out classes that were de-selected by rootmap attribute cveto = filter( lambda c: isRootmapVetoed(c),classes) for cv in cveto : cvname = cv['fullname'] # not all cvname have to be in cnames, cname could have been excluded if cvname in cnames: cnames.remove(cvname) new_lines = [] if libfile.rfind('/') != -1 : libfile = libfile[libfile.rfind('/')+1:] for c in cnames : nc = string.translate(str(c), transtable) # also remove possible seperator ' ', or set<basic_string<char> > becomes set<string > nc = re.sub(r"\bstd@@basic_string<char>-?", 'string', nc) nc = re.sub(r"\bstd@@", '', nc) nc = nc.replace(' ','') new_lines += '%-45s %s\n' % ('Library.' + nc + ':', libfile ) if not os.path.exists(mapfile) : lines = [ line+'\n' for line in model.split('\n')] else : f = open(mapfile,'r') lines = [ line for line in f.readlines()] f.close() if startmark in lines and endmark in lines : lines[lines.index(startmark)+1 : lines.index(endmark)] = new_lines else : lines[lines.index(finalmark):lines.index(finalmark)] = [startmark]+new_lines+[endmark] f = open(mapfile,'w') f.writelines(lines) f.close()
# Copyright CERN, CH-1211 Geneva 23, 2004-2006, All rights reserved. # # Permission to use, copy, modify, and distribute this software for any # purpose is hereby granted without fee, provided that this copyright and # permissions notice appear in all copies and derivatives. # # This software is provided "as is" without express or implied warranty. import os, sys, string, re model = """ # This file has been generated by genreflex with the --rootmap option #--Final End """ #---------------------------------------------------------------------------------- def isRootmapVetoed(c) : if c.has_key('extra') and 'rootmap' in c['extra'] : rootmapsel = c['extra']['rootmap'].lower() return (rootmapsel == 'false' or rootmapsel == '0') return False #---------------------------------------------------------------------------------- def genRootMap(mapfile, dicfile, libfile, cnames, classes) : startmark = '#--Begin ' + dicfile + '\n' endmark = '#--End ' + dicfile + '\n' finalmark = '#--Final End\n' transtable = string.maketrans(': ', '@-') transtable = string.maketrans(': ', '@-') for c in classes : c['fullname'] = c.get('fullname', c['name']) # filter out classes that were de-selected by rootmap attribute cveto = filter( lambda c: isRootmapVetoed(c),classes) for cv in cveto : cvname = cv['fullname'] # not all cvname have to be in cnames, cname could have been excluded if cvname in cnames: cnames.remove(cvname) new_lines = [] if libfile.rfind('/') != -1 : libfile = libfile[libfile.rfind('/')+1:] for c in cnames : nc = string.translate(str(c), transtable) nc = re.sub(r"\bstd@@basic_string<char>", 'string', nc) nc = re.sub(r"\bstd@@", '', nc) nc = nc.replace(' ','') new_lines += '%-45s %s\n' % ('Library.' + nc + ':', libfile ) if not os.path.exists(mapfile) : lines = [ line+'\n' for line in model.split('\n')] else : f = open(mapfile,'r') lines = [ line for line in f.readlines()] f.close() if startmark in lines and endmark in lines : lines[lines.index(startmark)+1 : lines.index(endmark)] = new_lines else : lines[lines.index(finalmark):lines.index(finalmark)] = [startmark]+new_lines+[endmark] f = open(mapfile,'w') f.writelines(lines) f.close()
lgpl-2.1
Python
b2e6a7a8df1ede0118838ce494e1679eea0eb578
Decrease cert expiration alerting threshold from 2 years to 1 year. (#1002)
scalyr/scalyr-agent-2,scalyr/scalyr-agent-2,scalyr/scalyr-agent-2,scalyr/scalyr-agent-2
scripts/check-bundled-ca-certs-expirations.py
scripts/check-bundled-ca-certs-expirations.py
#!/usr/bin/env python # Copyright 2014-2020 Scalyr Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script which errors out if any of the bundled certs will expire in 24 months or sooner. """ from __future__ import absolute_import from __future__ import print_function if False: from typing import List import os import glob import datetime from io import open from cryptography import x509 from cryptography.hazmat.backends import default_backend # By default we fail if any of the bundled cert expires in 1 year or sooner DEFAULT_EXPIRE_THRESHOLD_TIMEDELTA = datetime.timedelta(days=(12 * 30 * 1)) def fail_if_cert_expires_in_timedelta(cert_path, expire_in_threshold_timedelta): # type: (str, datetime.timedelta) -> None """ Fail and throw an exception if the provided certificate expires in the provided timedelta period or sooner. """ with open(cert_path, "rb") as fp: content = fp.read() cert = x509.load_pem_x509_certificate(content, default_backend()) now_dt = datetime.datetime.utcnow() expire_in_days = (cert.not_valid_after - now_dt).days if now_dt + expire_in_threshold_timedelta >= cert.not_valid_after: raise Exception( ( "Certificate %s will expire in %s days (%s), please update!" % (cert_path, expire_in_days, cert.not_valid_after) ) ) else: print( "OK - certificate %s will expire in %s days (%s)" % (cert_path, expire_in_days, cert.not_valid_after) ) def get_bundled_cert_paths(): # type: () -> List[str] """ Return full absolute paths for all the bundled certs. """ cwd = os.path.abspath(os.getcwd()) result = [] for file_name in glob.glob("certs/*"): file_path = os.path.join(cwd, file_name) result.append(file_path) return result def main(): cert_paths = get_bundled_cert_paths() for cert_path in cert_paths: fail_if_cert_expires_in_timedelta( cert_path, expire_in_threshold_timedelta=DEFAULT_EXPIRE_THRESHOLD_TIMEDELTA ) if __name__ == "__main__": main()
#!/usr/bin/env python # Copyright 2014-2020 Scalyr Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script which errors out if any of the bundled certs will expire in 24 months or sooner. """ from __future__ import absolute_import from __future__ import print_function if False: from typing import List import os import glob import datetime from io import open from cryptography import x509 from cryptography.hazmat.backends import default_backend # By default we fail if any of the bundled cert expires in 2 years or sooner DEFAULT_EXPIRE_THRESHOLD_TIMEDELTA = datetime.timedelta(days=(12 * 30 * 2)) def fail_if_cert_expires_in_timedelta(cert_path, expire_in_threshold_timedelta): # type: (str, datetime.timedelta) -> None """ Fail and throw an exception if the provided certificate expires in the provided timedelta period or sooner. """ with open(cert_path, "rb") as fp: content = fp.read() cert = x509.load_pem_x509_certificate(content, default_backend()) now_dt = datetime.datetime.utcnow() expire_in_days = (cert.not_valid_after - now_dt).days if now_dt + expire_in_threshold_timedelta >= cert.not_valid_after: raise Exception( ( "Certificate %s will expire in %s days (%s), please update!" % (cert_path, expire_in_days, cert.not_valid_after) ) ) else: print( "OK - certificate %s will expire in %s days (%s)" % (cert_path, expire_in_days, cert.not_valid_after) ) def get_bundled_cert_paths(): # type: () -> List[str] """ Return full absolute paths for all the bundled certs. """ cwd = os.path.abspath(os.getcwd()) result = [] for file_name in glob.glob("certs/*"): file_path = os.path.join(cwd, file_name) result.append(file_path) return result def main(): cert_paths = get_bundled_cert_paths() for cert_path in cert_paths: fail_if_cert_expires_in_timedelta( cert_path, expire_in_threshold_timedelta=DEFAULT_EXPIRE_THRESHOLD_TIMEDELTA ) if __name__ == "__main__": main()
apache-2.0
Python
1be539f68019435b2d09b1a46e4786a09e59edf2
Allow for multiple SEPA payment methods with different versions (#493) (#496)
CompassionCH/bank-payment,CompassionCH/bank-payment
account_banking_pain_base/models/account_payment_method.py
account_banking_pain_base/models/account_payment_method.py
# -*- coding: utf-8 -*- # © 2016 Akretion (Alexis de Lattre <[email protected]>) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo import models, fields, api, _ from odoo.exceptions import UserError class AccountPaymentMethod(models.Model): _inherit = 'account.payment.method' pain_version = fields.Selection([], string='PAIN Version') convert_to_ascii = fields.Boolean( string='Convert to ASCII', default=True, help="If active, Odoo will convert each accented character to " "the corresponding unaccented character, so that only ASCII " "characters are used in the generated PAIN file.") @api.multi def get_xsd_file_path(self): """This method is designed to be inherited in the SEPA modules""" self.ensure_one() raise UserError(_( "No XSD file path found for payment method '%s'") % self.name) _sql_constraints = [( # Extending this constraint from account_payment_mode 'code_payment_type_unique', 'unique(code, payment_type, pain_version)', 'A payment method of the same type already exists with this code' ' and PAIN version' )]
# -*- coding: utf-8 -*- # © 2016 Akretion (Alexis de Lattre <[email protected]>) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo import models, fields, api, _ from odoo.exceptions import UserError class AccountPaymentMethod(models.Model): _inherit = 'account.payment.method' pain_version = fields.Selection([], string='PAIN Version') convert_to_ascii = fields.Boolean( string='Convert to ASCII', default=True, help="If active, Odoo will convert each accented character to " "the corresponding unaccented character, so that only ASCII " "characters are used in the generated PAIN file.") @api.multi def get_xsd_file_path(self): """This method is designed to be inherited in the SEPA modules""" self.ensure_one() raise UserError(_( "No XSD file path found for payment method '%s'") % self.name)
agpl-3.0
Python
4697bb9bb7a3708f1c35b795c02db329d3142703
Add script to collect metrics in samples of a case into a single vector
jesusbriales/rgbd_benchmark_tools
src/rgbd_benchmark_tools/h5_collectSamples.py
src/rgbd_benchmark_tools/h5_collectSamples.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ Created on Thu Sep 17 09:02:31 2015 @author: jesus """ import argparse import numpy as np import h5py if __name__ == '__main__': parser = argparse.ArgumentParser(description=''' This script collects the metrics and results from several samples of an experiment into its parent group. ''') parser.add_argument('h5file', help='HDF5 file in which the metrics are stored in the group eval for each sample') parser.add_argument('group', help='H5 path of the main group containing sample minor groups') parser.add_argument('delta_unit', help='delta_unit of the metrics to collect') args = parser.parse_args() h5f = h5py.File(args.h5file,'a') unit = args.delta_unit # Save the evaluation metric values in the samples' parent group main_group = h5f[args.group] # Check if eval group already exists in the main group if 'eval/'+unit in main_group: print "Removing existing eval/"+unit + " group in" + main_group.name del main_group['eval/'+unit] numOfSamples = len(main_group) # Create new eval group in the main group samples = main_group.keys() samples = [x for x in samples if x != 'eval'] eval_group = main_group.require_group('eval/'+unit) names = ['rmse','median','mean','max'] for name in names: # Preallocate arrays t_arr = np.empty(numOfSamples) r_arr = np.empty(numOfSamples) # Store metrics in sample in an array for i, sample in enumerate(samples): t_arr[i] = main_group[sample+'/eval/'+unit+'/t_'+name][()] r_arr[i] = main_group[sample+'/eval/'+unit+'/r_'+name][()] # Check if dataset already exists in the group if 't_'+name in eval_group: print "Removing existing trans dataset in " + eval_group.name del eval_group['t_'+name] if 'r_'+name in eval_group: print "Removing existing rot dataset in " + eval_group.name del eval_group['r_'+name] # Save as a new dataset in the main group eval_group.create_dataset('t_'+name, data=t_arr) eval_group.create_dataset('r_'+name, data=r_arr)
#!/usr/bin/python # -*- coding: utf-8 -*- """ Created on Thu Sep 17 09:02:31 2015 @author: jesus """ import argparse import numpy as np import h5py if __name__ == '__main__': parser = argparse.ArgumentParser(description=''' This script collects the metrics and results from several samples of an experiment into its parent group. ''') parser.add_argument('h5file', help='HDF5 file in which the metrics are stored in the group eval for each sample') parser.add_argument('group', help='H5 path of the main group containing sample minor groups') parser.add_argument('delta_unit', help='delta_unit of the metrics to collect') args = parser.parse_args() h5f = h5py.File(args.h5file,'a') unit = args.delta_unit # Save the evaluation metric values in the samples' parent group main_group = h5f[args.group] # Check if eval group already exists in the main group if 'eval' in main_group: print "Removing existing eval group in" + main_group.name del main_group['eval'] numOfSamples = len(main_group) # Create new eval group in the main group samples = main_group.keys() eval_group = main_group.require_group('eval/'+args.delta_unit) names = ['rmse','median','mean','max'] for name in names: # Preallocate arrays t_arr = np.empty(numOfSamples) r_arr = np.empty(numOfSamples) # Store metrics in sample in an array for i, sample in enumerate(samples): t_arr[i] = main_group[sample+'/eval/'+unit+'/t_'+name][()] r_arr[i] = main_group[sample+'/eval/'+unit+'/r_'+name][()] # Check if dataset already exists in the group if 't_'+name in eval_group: print "Removing existing trans dataset in " + eval_group.name del eval_group['t_'+name] if 'r_'+name in eval_group: print "Removing existing rot dataset in " + eval_group.name del eval_group['r_'+name] # Save as a new dataset in the main group eval_group.create_dataset('t_'+name, data=t_arr) eval_group.create_dataset('r_'+name, data=r_arr)
bsd-2-clause
Python
0f295d0ee8c29361bd4f80dbc947da65dd7fbbe6
move raindrops
neiesc/Problem-solving,neiesc/Problem-solving,neiesc/Problem-solving,neiesc/Problem-solving
Exercism/python/raindrops/raindrops.py
Exercism/python/raindrops/raindrops.py
def convert(number): raindrops = ((3, "Pling"), (5, "Plang"), (7, "Plong")) raindrop_result = [raindrop[1] for raindrop in raindrops if number % raindrop[0] == 0] return "".join(raindrop_result) or str(number)
raindrops = ((3, "Pling"), (5, "Plang"), (7, "Plong")) def convert(number): raindrop_result = [raindrop[1] for raindrop in raindrops if number % raindrop[0] == 0] return "".join(raindrop_result) or str(number)
mit
Python
15ae458f7cf1a8257967b2b3b0ceb812547c4766
Test more edge cases of the highlighting parser
ipython/ipython,ipython/ipython
IPython/utils/tests/test_pycolorize.py
IPython/utils/tests/test_pycolorize.py
# coding: utf-8 """Test suite for our color utilities. Authors ------- * Min RK """ #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # third party import nose.tools as nt # our own from IPython.utils.PyColorize import Parser import io #----------------------------------------------------------------------------- # Test functions #----------------------------------------------------------------------------- sample = u""" def function(arg, *args, kwarg=True, **kwargs): ''' this is docs ''' pass is True False == None with io.open(ru'unicode'): raise ValueError("\n escape \r sequence") print("wěird ünicoðe") class Bar(Super): def __init__(self): super(Bar, self).__init__(1**2, 3^4, 5 or 6) """ def test_loop_colors(): for scheme in ('Linux', 'NoColor','LightBG'): def test_unicode_colorize(): p = Parser() f1 = p.format('1/0', 'str', scheme=scheme) f2 = p.format(u'1/0', 'str', scheme=scheme) nt.assert_equal(f1, f2) def test_parse_sample(): """and test writing to a buffer""" buf = io.StringIO() p = Parser() p.format(sample, buf, scheme=scheme) buf.seek(0) f1 = buf.read() nt.assert_not_in('ERROR', f1) def test_parse_error(): p = Parser() f1 = p.format(')', 'str', scheme=scheme) if scheme != 'NoColor': nt.assert_in('ERROR', f1) yield test_unicode_colorize yield test_parse_sample yield test_parse_error
"""Test suite for our color utilities. Authors ------- * Min RK """ #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # third party import nose.tools as nt # our own from IPython.utils.PyColorize import Parser #----------------------------------------------------------------------------- # Test functions #----------------------------------------------------------------------------- def test_unicode_colorize(): p = Parser() f1 = p.format('1/0', 'str') f2 = p.format(u'1/0', 'str') nt.assert_equal(f1, f2)
bsd-3-clause
Python
1eee9dfa6f7ea359f0dc4d0bf7450b3c96d3731d
Remove unnecessary var
reunition/reunition,reunition/reunition,reunition/reunition
reunition/apps/reunions/management/commands/setalumniusersfromrsvps.py
reunition/apps/reunions/management/commands/setalumniusersfromrsvps.py
from django.core.management.base import NoArgsCommand from django.db.models.fields import related from reunition.apps.alumni import models as alumni_m from reunition.apps.reunions import models as reunions_m class Command(NoArgsCommand): help = 'Associate reunions.Rsvp.created_by to alumni.Person.user when not yet set' def handle_noargs(self, **options): for rsvp in reunions_m.Rsvp.objects.all(): user = rsvp.created_by try: user.person except alumni_m.Person.DoesNotExist: first_alumni_added = rsvp.rsvpalumniattendee_set.order_by('created').first() if first_alumni_added: person = first_alumni_added.person print 'Associating user', user, 'with person', person person.user = user person.save()
from django.core.management.base import NoArgsCommand from django.db.models.fields import related from reunition.apps.alumni import models as alumni_m from reunition.apps.reunions import models as reunions_m class Command(NoArgsCommand): help = 'Associate reunions.Rsvp.created_by to alumni.Person.user when not yet set' def handle_noargs(self, **options): for rsvp in reunions_m.Rsvp.objects.all(): user = rsvp.created_by try: user.person except alumni_m.Person.DoesNotExist, e: first_alumni_added = rsvp.rsvpalumniattendee_set.order_by('created').first() if first_alumni_added: person = first_alumni_added.person print 'Associating user', user, 'with person', person person.user = user person.save()
mit
Python
5fc54a2120fbc9151073c9b247e3fd7e8e79a9fa
Remove premature attribute from migration script (Fixes #283)
phihag/adhocracy,SysTheron/adhocracy,alkadis/vcv,liqd/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,DanielNeugebauer/adhocracy,SysTheron/adhocracy,alkadis/vcv,liqd/adhocracy,DanielNeugebauer/adhocracy,SysTheron/adhocracy,liqd/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,alkadis/vcv,alkadis/vcv,phihag/adhocracy,phihag/adhocracy,liqd/adhocracy
src/adhocracy/migration/versions/054_add_hierachical_categorybadges.py
src/adhocracy/migration/versions/054_add_hierachical_categorybadges.py
from datetime import datetime from sqlalchemy import Column, ForeignKey, MetaData, Table from sqlalchemy import Boolean, Integer, DateTime, String, Unicode, LargeBinary metadata = MetaData() #table to update badge_table = Table( 'badge', metadata, #common attributes Column('id', Integer, primary_key=True), Column('type', String(40), nullable=False), Column('create_time', DateTime, default=datetime.utcnow), Column('title', Unicode(40), nullable=False), Column('color', Unicode(7), nullable=False), Column('description', Unicode(255), default=u'', nullable=False), Column('instance_id', Integer, ForeignKey('instance.id', ondelete="CASCADE",), nullable=True), # attributes for UserBadges Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")), Column('display_group', Boolean, default=False), Column('visible', Boolean, default=True), ) def upgrade(migrate_engine): #use sqlalchemy-migrate database connection metadata.bind = migrate_engine #autoload needed tables instance_table = Table('instance', metadata, autoload=True) #add hierachical columns to the table select_child_desc = Column('select_child_description', Unicode(255), default=u'', nullable=True) parent = Column('parent_id', Integer, ForeignKey('badge.id', ondelete="CASCADE"), nullable=True) #create/recreate the table select_child_desc.create(badge_table) select_child_desc.alter(nullable=False) parent.create(badge_table) def downgrade(migrate_engine): raise NotImplementedError()
from datetime import datetime from sqlalchemy import Column, ForeignKey, MetaData, Table from sqlalchemy import Boolean, Integer, DateTime, String, Unicode, LargeBinary metadata = MetaData() #table to update badge_table = Table( 'badge', metadata, #common attributes Column('id', Integer, primary_key=True), Column('type', String(40), nullable=False), Column('create_time', DateTime, default=datetime.utcnow), Column('title', Unicode(40), nullable=False), Column('color', Unicode(7), nullable=False), Column('description', Unicode(255), default=u'', nullable=False), Column('instance_id', Integer, ForeignKey('instance.id', ondelete="CASCADE",), nullable=True), # attributes for UserBadges Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")), Column('display_group', Boolean, default=False), Column('visible', Boolean, default=True), # attributes for ThumbnailBadges Column('thumbnail', LargeBinary, default=None, nullable=True) ) def upgrade(migrate_engine): #use sqlalchemy-migrate database connection metadata.bind = migrate_engine #autoload needed tables instance_table = Table('instance', metadata, autoload=True) #add hierachical columns to the table select_child_desc = Column('select_child_description', Unicode(255), default=u'', nullable=True) parent = Column('parent_id', Integer, ForeignKey('badge.id', ondelete="CASCADE"), nullable=True) #create/recreate the table select_child_desc.create(badge_table) select_child_desc.alter(nullable=False) parent.create(badge_table) def downgrade(migrate_engine): raise NotImplementedError()
agpl-3.0
Python
b06687b1e78645a055a314be4b1af693e2c3be05
remove obsolete arguments
StegSchreck/RatS,StegSchreck/RatS,StegSchreck/RatS
RatS/filmaffinity/filmaffinity_site.py
RatS/filmaffinity/filmaffinity_site.py
import time from RatS.base.base_site import Site from selenium.webdriver.common.by import By class FilmAffinity(Site): def __init__(self, args): login_form_selector = "//form[@id='login-form']" self.LOGIN_USERNAME_SELECTOR = login_form_selector + "//input[@name='username']" self.LOGIN_PASSWORD_SELECTOR = login_form_selector + "//input[@name='password']" self.LOGIN_BUTTON_SELECTOR = login_form_selector + "//input[@type='submit']" super(FilmAffinity, self).__init__(args) self.MY_RATINGS_URL = "https://www.filmaffinity.com/en/myvotes.php" def _get_login_page_url(self): return "https://www.filmaffinity.com/en/login.php" def _handle_cookie_notice_if_present(self): cookie_notices = self.browser.find_elements(By.ID, "qc-cmp2-container") if len(cookie_notices) == 0: return cookie_notice = cookie_notices[0] if cookie_notice is not None: # agree cookie_accept_button = cookie_notice.find_elements( By.CSS_SELECTOR, "div.qc-cmp2-summary-buttons button" ) if cookie_accept_button is not None and len(cookie_accept_button) > 1: cookie_accept_button[1].click() time.sleep(2) # agree all cookie_accept_button = cookie_notice.find_elements( By.CSS_SELECTOR, "div.qc-cmp2-buttons-desktop button", ) if cookie_accept_button is not None and len(cookie_accept_button) > 1: cookie_accept_button[1].click() time.sleep(2)
import time from RatS.base.base_site import Site from selenium.webdriver.common.by import By class FilmAffinity(Site): def __init__(self, args): login_form_selector = "//form[@id='login-form']" self.LOGIN_USERNAME_SELECTOR = login_form_selector + "//input[@name='username']" self.LOGIN_PASSWORD_SELECTOR = login_form_selector + "//input[@name='password']" self.LOGIN_BUTTON_SELECTOR = login_form_selector + "//input[@type='submit']" super(FilmAffinity, self).__init__(args) self.MY_RATINGS_URL = "https://www.filmaffinity.com/en/myvotes.php" def _get_login_page_url(self): return "https://www.filmaffinity.com/en/login.php" def _handle_cookie_notice_if_present(self): cookie_notices = self.browser.find_elements(By.ID, "qc-cmp2-container") if len(cookie_notices) == 0: return cookie_notice = cookie_notices[0] if cookie_notice is not None: # agree cookie_accept_button = cookie_notice.find_elements( By.CSS_SELECTOR, By.CSS_SELECTOR, "div.qc-cmp2-summary-buttons button" ) if cookie_accept_button is not None and len(cookie_accept_button) > 1: cookie_accept_button[1].click() time.sleep(2) # agree all cookie_accept_button = cookie_notice.find_elements( By.CSS_SELECTOR, By.CSS_SELECTOR, "div.qc-cmp2-buttons-desktop button", ) if cookie_accept_button is not None and len(cookie_accept_button) > 1: cookie_accept_button[1].click() time.sleep(2)
agpl-3.0
Python
c79c3b7f920f4bcf5fb69cf74b224e6ff37a709b
test triggering travis
Lenijas/test-travisci,Lenijas/test-travisci,Lenijas/test-travisci
fabre_test.py
fabre_test.py
#!/usr/bin/env python # coding=UTF-8 import pytest import sys # content of test_assert1.py def f(): return 3 def test_function(): assert f() == 4 test_function()
#!/usr/bin/env python # coding=UTF-8 import pytest import sys sys.exit(0)
bsd-3-clause
Python
0fb800cd42f1545e8d5e744af1ff81922c930448
Add Google analytics ID
glasslion/zha-beta,glasslion/zha-beta,glasslion/zha-beta,glasslion/zha,glasslion/zha,glasslion/zha
pelicanconf.py
pelicanconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals from datetime import datetime import os import sys BASE_DIR = os.path.dirname(__file__) # Clone the official plugin repo to the `official_plugins` dir # (https://github.com/getpelican/pelican-plugins) sys.path.append(os.path.join(BASE_DIR, "official_plugins")) AUTHOR = u'Leonardo Zhou' SITENAME = u'翼图南' SITE_DESCRIPTION = u'故九萬里,則風斯在下矣,而後乃今培風;背負青天而莫之夭閼者,而後乃今將圖南' SITEURL = '' TIMEZONE = 'Asia/Shanghai' DEFAULT_LANG = u'zh' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None # Social widget SOCIAL = ( ('twitter', 'https://twitter.com/glasslion'), ('envelope', 'mailto:[email protected]'), ('github', 'https://github.com/glasslion'), ('stack-overflow', 'http://stackoverflow.com/users/1093020/leonardo-z'), ) GOOGLE_ANALYTICS = "UA-42951023-1" LOCALE = ('usa', 'en_US.utf8') DEFAULT_DATE_FORMAT = '%b %d, %Y' # DIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives') # PAGINATED_DIRECT_TEMPLATES = (('blog',)) PLUGINS = ['summary', 'assets', 'neighbors'] # Assets ASSET_BUNDLES = () ASSET_CONFIG = (('sass_bin', 'sass'), ) SUMMARY_MAX_LENGTH = 20 DEFAULT_PAGINATION = 5 # Uncomment following line if you want document-relative URLs when developing RELATIVE_URLS = True # Static content STATIC_PATHS = ['images', 'extra/CNAME',] EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},} # Url ARTICLE_URL = '{slug}/' ARTICLE_SAVE_AS = '{slug}/index.html' # Archive YEAR_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/index.html' MONTH_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/{date:%m}/index.html' # Custom theme THEME = '../pelican-zha' CURRENT_DATETIME = datetime.now() QINIU_BUCKET_URL = 'http://wing2south.qiniudn.com' CDN_URL = SITEURL
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals from datetime import datetime import os import sys BASE_DIR = os.path.dirname(__file__) # Clone the official plugin repo to the `official_plugins` dir # (https://github.com/getpelican/pelican-plugins) sys.path.append(os.path.join(BASE_DIR, "official_plugins")) AUTHOR = u'Leonardo Zhou' SITENAME = u'翼图南' SITE_DESCRIPTION = u'故九萬里,則風斯在下矣,而後乃今培風;背負青天而莫之夭閼者,而後乃今將圖南' SITEURL = '' TIMEZONE = 'Asia/Shanghai' DEFAULT_LANG = u'zh' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None # Social widget SOCIAL = ( ('twitter', 'https://twitter.com/glasslion'), ('envelope', 'mailto:[email protected]'), ('github', 'https://github.com/glasslion'), ('stack-overflow', 'http://stackoverflow.com/users/1093020/leonardo-z'), ) LOCALE = ('usa', 'en_US.utf8') DEFAULT_DATE_FORMAT = '%b %d, %Y' # DIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives') # PAGINATED_DIRECT_TEMPLATES = (('blog',)) PLUGINS = ['summary', 'assets', 'neighbors'] # Assets ASSET_BUNDLES = () ASSET_CONFIG = (('sass_bin', 'sass'), ) SUMMARY_MAX_LENGTH = 20 DEFAULT_PAGINATION = 5 # Uncomment following line if you want document-relative URLs when developing RELATIVE_URLS = True # Static content STATIC_PATHS = ['images', 'extra/CNAME',] EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},} # Url ARTICLE_URL = '{slug}/' ARTICLE_SAVE_AS = '{slug}/index.html' # Archive YEAR_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/index.html' MONTH_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/{date:%m}/index.html' # Custom theme THEME = '../pelican-zha' CURRENT_DATETIME = datetime.now() QINIU_BUCKET_URL = 'http://wing2south.qiniudn.com' CDN_URL = SITEURL
cc0-1.0
Python
6a1176d547694b535bc581d5a0af87230d533caf
set to version 3.305.533
UmSenhorQualquer/pythonVideoAnnotator
base/pythonvideoannotator/pythonvideoannotator/__init__.py
base/pythonvideoannotator/pythonvideoannotator/__init__.py
# !/usr/bin/python3 # -*- coding: utf-8 -*- __version__ = "3.305.533" __author__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro", "Hugo Cachitas"] __credits__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro", "Hugo Cachitas"] __license__ = "Attribution-NonCommercial-ShareAlike 4.0 International" __maintainer__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro"] __email__ = ["ricardojvr at gmail.com", "cajomferro at gmail.com"] __status__ = "Development" from confapp import conf; conf += 'pythonvideoannotator.settings' import logging logger = logging.getLogger(__name__) logger.setLevel(conf.APP_LOG_HANDLER_LEVEL) if conf.APP_LOG_HANDLER_FILE: logger = logging.getLogger() loggers_formatter = logging.Formatter(conf.PYFORMS_LOG_FORMAT) fh = logging.FileHandler(conf.APP_LOG_HANDLER_FILE) fh.setLevel(conf.APP_LOG_HANDLER_FILE_LEVEL) fh.setFormatter(loggers_formatter) logger.addHandler(fh)
# !/usr/bin/python3 # -*- coding: utf-8 -*- __version__ = "3.305.532" __author__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro", "Hugo Cachitas"] __credits__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro", "Hugo Cachitas"] __license__ = "Attribution-NonCommercial-ShareAlike 4.0 International" __maintainer__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro"] __email__ = ["ricardojvr at gmail.com", "cajomferro at gmail.com"] __status__ = "Development" from confapp import conf; conf += 'pythonvideoannotator.settings' import logging logger = logging.getLogger(__name__) logger.setLevel(conf.APP_LOG_HANDLER_LEVEL) if conf.APP_LOG_HANDLER_FILE: logger = logging.getLogger() loggers_formatter = logging.Formatter(conf.PYFORMS_LOG_FORMAT) fh = logging.FileHandler(conf.APP_LOG_HANDLER_FILE) fh.setLevel(conf.APP_LOG_HANDLER_FILE_LEVEL) fh.setFormatter(loggers_formatter) logger.addHandler(fh)
mit
Python
d82c37a85e3522f7cf7e26a220eb5946aec66ffe
Create docs from numpy
datamicroscopes/lda,datamicroscopes/lda,datamicroscopes/lda
test/test_data_utils.py
test/test_data_utils.py
import numpy as np from cStringIO import StringIO from nose.tools import raises from microscopes.lda import utils def test_docs_from_document_term_matrix(): dtm = [[2, 1], [3, 2]] docs = [[0, 0, 1], [0, 0, 0, 1, 1]] assert utils.docs_from_document_term_matrix(dtm) == docs def test_docs_from_numpy_dtp(): dtm = np.array([[2, 1], [3, 2]]) docs = [[0, 0, 1], [0, 0, 0, 1, 1]] assert utils.docs_from_document_term_matrix(dtm) == docs def test_docs_from_ldac_simple(): stream = StringIO() stream.write("2 0:2 1:1\n2 0:3 1:2") stream.seek(0) # rewind stream docs = [[0, 0, 1], [0, 0, 0, 1, 1]] assert utils.docs_from_ldac(stream) == docs stream = StringIO() stream.write("2 1:1 0:2\n3 2:1 0:3 1:1") stream.seek(0) # rewind stream docs = [[1, 0, 0], [2, 0, 0, 0, 1]] assert utils.docs_from_ldac(stream) == docs @raises(AssertionError) def test_bad_ldac_data(): stream = StringIO() stream.write("2 0:1") stream.seek(0) # rewind stream utils.docs_from_ldac(stream)
from cStringIO import StringIO from nose.tools import raises from microscopes.lda import utils def test_docs_from_document_term_matrix(): dtm = [[2, 1], [3, 2]] docs = [[0, 0, 1], [0, 0, 0, 1, 1]] assert utils.docs_from_document_term_matrix(dtm) == docs def test_docs_from_ldac_simple(): stream = StringIO() stream.write("2 0:2 1:1\n2 0:3 1:2") stream.seek(0) # rewind stream docs = [[0, 0, 1], [0, 0, 0, 1, 1]] assert utils.docs_from_ldac(stream) == docs stream = StringIO() stream.write("2 1:1 0:2\n3 2:1 0:3 1:1") stream.seek(0) # rewind stream docs = [[1, 0, 0], [2, 0, 0, 0, 1]] assert utils.docs_from_ldac(stream) == docs @raises(AssertionError) def test_bad_ldac_data(): stream = StringIO() stream.write("2 0:1") stream.seek(0) # rewind stream utils.docs_from_ldac(stream)
bsd-3-clause
Python
36ed44e94916d6abe3458645c957dd9715cbc532
set STATIC_ROOT
joaoleveiga/django-wsgi-example,joaoleveiga/django-wsgi-example
myproj/myproj/settings.py
myproj/myproj/settings.py
""" Django settings for myproj project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'unsecret_key' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'myapp', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'myproj.urls' WSGI_APPLICATION = 'myproj.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'web', 'static') TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'myproj', 'templates'), ) try: from local_settings import * except ImportError: pass
""" Django settings for myproj project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'unsecret_key' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'myapp', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'myproj.urls' WSGI_APPLICATION = 'myproj.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'myproj', 'templates'), ) try: from local_settings import * except ImportError: pass
mit
Python
566ceb81a14685c201f3c92668dc0530a1a91176
fix path
Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization
organization/projects/management/commands/project_inject_content.py
organization/projects/management/commands/project_inject_content.py
# -*- coding: utf-8 -*- # # Copyright (c) 2016-2017 Ircam # Copyright (c) 2016-2017 Guillaume Pellerin # Copyright (c) 2016-2017 Emilie Zawadzki # This file is part of mezzanine-organization. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import requests import json from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand, CommandError from organization.projects.models import * from django.utils.text import slugify from django.contrib.sites.models import Site from copy import deepcopy class Command(BaseCommand): help = """Retrieve content_fr of old mode Project from database Tue Feb 5 14:26:55 2019 +0100 """ def handle(self, *args, **options): json_path = '/srv/lib/mezzanine-organization/organization/projects/management/commands/projects.json' old_projects = self.read_json(json_path) project_pages = ProjectPage.objects.all() for project_page in project_pages: print(project_page.site_id) for old_project in old_projects: if old_project['pk'] == project_page.project_id: # inject _fr in _en (because _fr became _en) if not project_page.content_en: project_page.content_en = project_page.content_fr project_page.content_fr = old_project['fields']['content_fr'] project_page.save() def read_file(self, path): file = open(path, "r") data = file.read() file.close() return data def read_json(self, path): return json.loads(self.read_file(path))
# -*- coding: utf-8 -*- # # Copyright (c) 2016-2017 Ircam # Copyright (c) 2016-2017 Guillaume Pellerin # Copyright (c) 2016-2017 Emilie Zawadzki # This file is part of mezzanine-organization. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import requests import json from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand, CommandError from organization.projects.models import * from django.utils.text import slugify from django.contrib.sites.models import Site from copy import deepcopy class Command(BaseCommand): help = """Retrieve content_fr of old mode Project from database Tue Feb 5 14:26:55 2019 +0100 """ def handle(self, *args, **options): old_projects = self.read_json('projects.json') project_pages = ProjectPage.objects.all() for project_page in project_pages: print(project_page.site_id) for old_project in old_projects: if old_project['pk'] == project_page.project_id: # inject _fr in _en (because _fr became _en) if not project_page.content_en: project_page.content_en = project_page.content_fr project_page.content_fr = old_project['fields']['content_fr'] project_page.save() def read_file(self, path): file = open(path, "r") data = file.read() file.close() return data def read_json(self, path): return json.loads(self.read_file(path))
agpl-3.0
Python
70004e7caf332e55d40b4f1f757138c4cd35a3fe
fix path
Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization
organization/projects/management/commands/project_inject_content.py
organization/projects/management/commands/project_inject_content.py
# -*- coding: utf-8 -*- # # Copyright (c) 2016-2017 Ircam # Copyright (c) 2016-2017 Guillaume Pellerin # Copyright (c) 2016-2017 Emilie Zawadzki # This file is part of mezzanine-organization. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import requests import json from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand, CommandError from organization.projects.models import * from django.utils.text import slugify from django.contrib.sites.models import Site from copy import deepcopy class Command(BaseCommand): help = """Retrieve content_fr of old mode Project from database Tue Feb 5 14:26:55 2019 +0100 """ def handle(self, *args, **options): json_path = '/srv/lib/mezzanine-organization/organization/projects/management/commands/projects.json' old_projects = self.read_json(json_path) project_pages = ProjectPage.objects.all() for project_page in project_pages: print(project_page.site_id) for old_project in old_projects: if old_project['pk'] == project_page.project_id: # inject _fr in _en (because _fr became _en) if not project_page.content_en: project_page.content_en = project_page.content_fr project_page.content_fr = old_project['fields']['content_fr'] project_page.save() def read_file(self, path): file = open(path, "r") data = file.read() file.close() return data def read_json(self, path): return json.loads(self.read_file(path))
# -*- coding: utf-8 -*- # # Copyright (c) 2016-2017 Ircam # Copyright (c) 2016-2017 Guillaume Pellerin # Copyright (c) 2016-2017 Emilie Zawadzki # This file is part of mezzanine-organization. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import requests import json from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand, CommandError from organization.projects.models import * from django.utils.text import slugify from django.contrib.sites.models import Site from copy import deepcopy class Command(BaseCommand): help = """Retrieve content_fr of old mode Project from database Tue Feb 5 14:26:55 2019 +0100 """ def handle(self, *args, **options): old_projects = self.read_json('projects.json') project_pages = ProjectPage.objects.all() for project_page in project_pages: print(project_page.site_id) for old_project in old_projects: if old_project['pk'] == project_page.project_id: # inject _fr in _en (because _fr became _en) if not project_page.content_en: project_page.content_en = project_page.content_fr project_page.content_fr = old_project['fields']['content_fr'] project_page.save() def read_file(self, path): file = open(path, "r") data = file.read() file.close() return data def read_json(self, path): return json.loads(self.read_file(path))
agpl-3.0
Python
2e4ec0fea35722fbdbab36ce326e664249e3eaf7
Add support jinja2
beni55/nacho,beni55/nacho,avelino/nacho
nacho/controllers/base.py
nacho/controllers/base.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from tornado.web import RequestHandler from jinja2 import Environment, FileSystemLoader, TemplateNotFound class ApplicationController(RequestHandler): def render(self, template_name, **kwargs): kwargs.update({ 'settings': self.settings, 'STATIC_URL': self.settings.get('static_url_prefix', '/static/'), 'request': self.request, 'xsrf_token': self.xsrf_token, 'xsrf_form_html': self.xsrf_form_html, }) self.write(self.render_template(template_name, **kwargs)) def render_template(self, template_name, **kwargs): template_dirs = [] if self.settings.get('template_path', ''): template_dirs.append(self.settings["template_path"]) env = Environment(loader=FileSystemLoader(template_dirs)) try: template = env.get_template(template_name) except TemplateNotFound: raise TemplateNotFound(template_name) return template.render(kwargs)
#!/usr/bin/env python # -*- coding: utf-8 -*- from cyclone.web import RequestHandler class ApplicationController(RequestHandler): pass
mit
Python
45c86ade944d9afe7bc8e627e25fa861489cd4b6
fix a typo so that email is sent to the correct host
crateio/crate.web,crateio/crate.web
crate_project/settings/production/gondor.py
crate_project/settings/production/gondor.py
import os from .base import * from local_settings import * # Instance specific settings (in deploy.settings_[INSTANCE_NAME])) # Fix Email Settings SERVER_EMAIL = "[email protected]" DEFAULT_FROM_EMAIL = "[email protected]" CACHES = { "default": { "BACKEND": "redis_cache.RedisCache", "LOCATION": ":".join([GONDOR_REDIS_HOST, str(GONDOR_REDIS_PORT)]), "KEY_PREFIX": "cache", "OPTIONS": { "DB": 0, "PASSWORD": GONDOR_REDIS_PASSWORD, } } } PYPI_DATASTORE_CONFIG = { "host": GONDOR_REDIS_HOST, "port": GONDOR_REDIS_PORT, "password": GONDOR_REDIS_PASSWORD, } LOCK_DATASTORE_CONFIG = PYPI_DATASTORE_CONFIG # Configure Celery BROKER_TRANSPORT = "redis" BROKER_HOST = GONDOR_REDIS_HOST BROKER_PORT = GONDOR_REDIS_PORT BROKER_VHOST = "0" BROKER_PASSWORD = GONDOR_REDIS_PASSWORD BROKER_POOL_LIMIT = 10 CELERY_RESULT_BACKEND = "redis" CELERY_REDIS_HOST = GONDOR_REDIS_HOST CELERY_REDIS_PORT = GONDOR_REDIS_PORT CELERY_REDIS_PASSWORD = GONDOR_REDIS_PASSWORD SECRET_KEY = os.environ["SECRET_KEY"] EMAIL_HOST = os.environ["EMAIL_HOST"] EMAIL_PORT = int(os.environ["EMAIL_PORT"]) EMAIL_HOST_USER = os.environ["EMAIL_HOST_USER"] EMAIL_HOST_PASSWORD = os.environ["EMAIL_HOST_PASSWORD"] EMAIL_USE_TLS = True AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] HAYSTACK_CONNECTIONS = { "default": { "ENGINE": os.environ["HAYSTACK_DEFAULT_ENGINE"], "URL": os.environ["HAYSTACK_DEFAULT_URL"], "INDEX_NAME": os.environ["HAYSTACK_DEFAULT_INDEX_NAME"], }, } INTERCOM_USER_HASH_KEY = os.environ["INTERCOM_USER_HASH_KEY"]
import os from .base import * from local_settings import * # Instance specific settings (in deploy.settings_[INSTANCE_NAME])) # Fix Email Settings SERVER_EMAIL = "[email protected]" DEFAULT_FROM_EMAIL = "[email protected]" CACHES = { "default": { "BACKEND": "redis_cache.RedisCache", "LOCATION": ":".join([GONDOR_REDIS_HOST, str(GONDOR_REDIS_PORT)]), "KEY_PREFIX": "cache", "OPTIONS": { "DB": 0, "PASSWORD": GONDOR_REDIS_PASSWORD, } } } PYPI_DATASTORE_CONFIG = { "host": GONDOR_REDIS_HOST, "port": GONDOR_REDIS_PORT, "password": GONDOR_REDIS_PASSWORD, } LOCK_DATASTORE_CONFIG = PYPI_DATASTORE_CONFIG # Configure Celery BROKER_TRANSPORT = "redis" BROKER_HOST = GONDOR_REDIS_HOST BROKER_PORT = GONDOR_REDIS_PORT BROKER_VHOST = "0" BROKER_PASSWORD = GONDOR_REDIS_PASSWORD BROKER_POOL_LIMIT = 10 CELERY_RESULT_BACKEND = "redis" CELERY_REDIS_HOST = GONDOR_REDIS_HOST CELERY_REDIS_PORT = GONDOR_REDIS_PORT CELERY_REDIS_PASSWORD = GONDOR_REDIS_PASSWORD SECRET_KEY = os.environ["SECRET_KEY"] EMAIL_HOST = os.environ["EMAIL_HOST_PASSWORD"] EMAIL_PORT = int(os.environ["EMAIL_PORT"]) EMAIL_HOST_USER = os.environ["EMAIL_HOST_USER"] EMAIL_HOST_PASSWORD = os.environ["EMAIL_HOST_PASSWORD"] EMAIL_USE_TLS = True AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] HAYSTACK_CONNECTIONS = { "default": { "ENGINE": os.environ["HAYSTACK_DEFAULT_ENGINE"], "URL": os.environ["HAYSTACK_DEFAULT_URL"], "INDEX_NAME": os.environ["HAYSTACK_DEFAULT_INDEX_NAME"], }, } INTERCOM_USER_HASH_KEY = os.environ["INTERCOM_USER_HASH_KEY"]
bsd-2-clause
Python
2a1407b34187cfba6c968a7b95e58ec1c115a8f6
Print functions
datacommonsorg/api-python,datacommonsorg/api-python
datacommons/examples/population_analysis.py
datacommons/examples/population_analysis.py
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example analysis with DataCommons Python API. """ import pandas as pd import datacommons def main(): dc = datacommons.Client() # Build a table with a single US state state_table = dc.get_states('United States', 'state', max_rows=1) # Add the state name and the 5 counties contained in that state state_table = dc.expand( state_table, 'name', 'state', 'state_name', outgoing=True) state_table = dc.expand( state_table, 'containedInPlace', 'state', 'county', outgoing=False, max_rows=3) state_table = dc.expand( state_table, 'name', 'county', 'county_name', outgoing=True) state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_population', population_type='Person', max_rows=100) with pd.option_context('display.width', 400, 'display.max_rows', 100): print(state_table) state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_18_24_years_population', population_type='Person', max_rows=100, age='USC/18To24Years') with pd.option_context('display.width', 400, 'display.max_rows', 100): print(state_table) state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_male_population', population_type='Person', max_rows=100, gender='Male') with pd.option_context('display.width', 400, 'display.max_rows', 100): print(state_table) state_table = dc.get_observations( state_table, seed_col_name='county_population', new_col_name='county_person_count', start_date='2012-01-01', end_date='2016-01-01', measured_property='count', stats_type='count') with pd.option_context('display.width', 400, 'display.max_rows', 100): print(state_table) if __name__ == '__main__': main()
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example analysis with DataCommons Python API. """ import pandas as pd import datacommons def main(): dc = datacommons.Client() # Build a table with a single US state state_table = dc.get_states('United States', 'state', max_rows=1) # Add the state name and the 5 counties contained in that state state_table = dc.expand( state_table, 'name', 'state', 'state_name', outgoing=True) state_table = dc.expand( state_table, 'containedInPlace', 'state', 'county', outgoing=False, max_rows=2) state_table = dc.expand( state_table, 'name', 'county', 'county_name', outgoing=True) state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_population', population_type='Person', max_rows=100) with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_18_24_years_population', population_type='Person', max_rows=100, age='USC/18To24Years') with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table state_table = dc.get_populations( state_table, seed_col_name='county', new_col_name='county_male_population', population_type='Person', max_rows=100, gender='Male') with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table state_table = dc.get_observations( state_table, seed_col_name='county_population', new_col_name='county_person_count', start_date='2012-01-01', end_date='2016-01-01', measured_property='count', stats_type='count') with pd.option_context('display.width', 400, 'display.max_rows', 100): print state_table if __name__ == '__main__': main()
apache-2.0
Python
d74b15485a0756ac1702fafd640f616f022b3f58
bump verions
rygwdn/equals,toddsifleet/equals
equals/__init__.py
equals/__init__.py
from __future__ import absolute_import __version__ = '0.0.21' import numbers import collections from equals.equals import Equals as instance_of from equals.constraints.anything_true import AnythingTrue from equals.constraints.anything_false import AnythingFalse anything = instance_of() try: any_string = instance_of(basestring) except NameError: any_string = instance_of(str) any_number = instance_of(numbers.Number) any_int = instance_of(int) any_float = instance_of(float) any_iterable = instance_of(collections.Iterable) any_dict = instance_of(dict) any_list = instance_of(list) any_tuple = instance_of(tuple) anything_false = AnythingFalse(anything) anything_true = AnythingTrue(anything)
from __future__ import absolute_import __version__ = '0.0.2' import numbers import collections from equals.equals import Equals as instance_of from equals.constraints.anything_true import AnythingTrue from equals.constraints.anything_false import AnythingFalse anything = instance_of() try: any_string = instance_of(basestring) except NameError: any_string = instance_of(str) any_number = instance_of(numbers.Number) any_int = instance_of(int) any_float = instance_of(float) any_iterable = instance_of(collections.Iterable) any_dict = instance_of(dict) any_list = instance_of(list) any_tuple = instance_of(tuple) anything_false = AnythingFalse(anything) anything_true = AnythingTrue(anything)
mit
Python
9c2d1e9e841014dbc986b6e509b19f7f881969c4
Fix silly typo
spendb/spendb,openspending/spendb,spendb/spendb,pudo/spendb,pudo/spendb,USStateDept/FPA_Core,CivicVision/datahub,johnjohndoe/spendb,spendb/spendb,CivicVision/datahub,CivicVision/datahub,johnjohndoe/spendb,openspending/spendb,USStateDept/FPA_Core,pudo/spendb,nathanhilbert/FPA_Core,nathanhilbert/FPA_Core,johnjohndoe/spendb,USStateDept/FPA_Core,nathanhilbert/FPA_Core,openspending/spendb
openspending/lib/csvexport.py
openspending/lib/csvexport.py
import csv import sys from datetime import datetime from openspending import model from openspending.mongo import DBRef, ObjectId def write_csv(entries, response): response.content_type = 'text/csv' # NOTE: this should be a streaming service but currently # I see no way to know the full set of keys without going # through the data twice. keys = set() rows = [] for entry in entries: d = {} for k, v in model.entry.to_query_dict(entry).items(): if isinstance(v, (list, tuple, dict, DBRef)): continue elif isinstance(v, ObjectId): v = str(v) elif isinstance(v, datetime): v = v.isoformat() d[unicode(k).encode('utf8')] = unicode(v).encode('utf8') keys.update(d.keys()) rows.append(d) fields = sorted(keys) writer = csv.DictWriter(response, fields) if sys.version_info < (2,7): header = dict(zip(fields, fields)) writer.writerow(header) else: writer.writeheader() writer.writerows(rows)
import csv import sys from datetime import datetime from openspending import model from openspending.mongo import DBRef, ObjectId def write_csv(entries, response): response.content_type = 'text/csv' # NOTE: this should be a streaming service but currently # I see no way to know the full set of keys without going # through the data twice. keys = set() rows = [] for entry in entries: d = {} for k, v in model.entry.to_query_dict(entry).items(): if isinstance(v, (list, tuple, dict, DBRef)): continue elif isinstance(v, ObjectId): v = str(v) elif isinstance(v, datetime): v = v.isoformat() d[unicode(k).encode('utf8')] = unicode(v).encode('utf8') keys.update(d.keys()) rows.append(d) fields = sorted(keys) writer = csv.DictWriter(response, fields) if sys.version_info < (2,7): header = dict(zip(fields, fields)) self.writerow(header) else: writer.writeheader() writer.writerows(rows)
agpl-3.0
Python
ab35f508375c760770884882acaea79079a1a976
remove unnesecary print
natemara/python-erlang
erlang/__init__.py
erlang/__init__.py
from __future__ import division def extended_b_lines(usage, blocking): ''' Uses the Extended Erlang B formula to calcluate the ideal number of lines for the given usage in erlangs and the given blocking rate. Usage: extended_b_lines(usage, blocking) ''' line_count = 1 while extended_b(usage, line_count) > blocking: line_count += 1 return line_count def extended_b(usage, lines, recall=0.5): ''' Usage: extended_b(usage, lines, recall=0.5) ''' original_usage = usage while True: PB = b(usage, lines) magic_number_1 = (1 - PB) * usage + (1 - recall) * PB * usage magic_number_2 = 0.9999 * original_usage if magic_number_1 >= magic_number_2: return PB usage = original_usage + recall * PB * usage return -1 def b(usage, lines): ''' Usage: b(usage, lines) ''' if usage > 0: PBR = (1 + usage) / usage for index in range(2, lines + 1): PBR = index / usage * PBR + 1 if PBR > 10000: return 0 return 1 / PBR return 0
from __future__ import division def extended_b_lines(usage, blocking): ''' Uses the Extended Erlang B formula to calcluate the ideal number of lines for the given usage in erlangs and the given blocking rate. Usage: extended_b_lines(usage, blocking) ''' line_count = 1 while extended_b(usage, line_count) > blocking: line_count += 1 return line_count def extended_b(usage, lines, recall=0.5): ''' Usage: extended_b(usage, lines, recall=0.5) ''' original_usage = usage while True: PB = b(usage, lines) magic_number_1 = (1 - PB) * usage + (1 - recall) * PB * usage magic_number_2 = 0.9999 * original_usage if magic_number_1 >= magic_number_2: return PB usage = original_usage + recall * PB * usage return -1 def b(usage, lines): ''' Usage: b(usage, lines) ''' if usage > 0: PBR = (1 + usage) / usage for index in range(2, lines + 1): print(PBR) PBR = index / usage * PBR + 1 if PBR > 10000: return 0 return 1 / PBR return 0
mit
Python
c40a07e4ba1bfefd977bc9eea71abe5fcaf97370
Use custom exception in place of NotImplemented
gwhigs/digital-manifesto,gwhigs/digital-manifesto,gwhigs/digital-manifesto,gwhigs/digital-manifesto
manifestos/twitter.py
manifestos/twitter.py
import re from django.conf import settings import tweepy TWITTER_CONSUMER_KEY = settings.TWITTER_CONSUMER_KEY TWITTER_CONSUMER_SECRET = settings.TWITTER_CONSUMER_SECRET TWITTER_ACCESS_KEY = settings.TWITTER_ACCESS_KEY TWITTER_ACCESS_SECRET = settings.TWITTER_ACCESS_SECRET class TwitterBotException(Exception): pass class TwitterBot(object): """ Creates tweets for the Digital Manifest Twitter Bot. """ def get_auth(self): auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_KEY, TWITTER_ACCESS_SECRET) return auth def get_api(self): auth = self.get_auth() return tweepy.API(auth) def tweet(self, text): # Make sure we have legitimate text to tweet if not isinstance(text, str): raise TwitterBotException('Can only tweet strings.') text = text.strip() if not text: raise TwitterBotException('Text has no content.') # Escape SMS commands pattern = re.compile( r'^(ON|OFF|FOLLOW|F|UNFOLLOW|LEAVE|L|STOP|QUIT|END|CANCEL|' r'UNSBSCRIBE|ARRET|D|M|RETWEET|RT|SET|WHOIS|W|GET|G|FAV|FAVE|' r'FAVORITE|FAVORITE|\*|STATS|SUGGEST|SUG|S|WTF|HELP|INFO|AIDE|' r'BLOCK|BLK|REPORT|REP)(\W)(.*)', re.I) text = re.sub(pattern, '\\1\u200B\\2\\3', text) # Truncate to 140 characters text = text[:140] # Tweet api = self.get_api() api.update_status(status=text)
import re from django.conf import settings import tweepy TWITTER_CONSUMER_KEY = settings.TWITTER_CONSUMER_KEY TWITTER_CONSUMER_SECRET = settings.TWITTER_CONSUMER_SECRET TWITTER_ACCESS_KEY = settings.TWITTER_ACCESS_KEY TWITTER_ACCESS_SECRET = settings.TWITTER_ACCESS_SECRET class TwitterBot(object): """ Creates tweets for the Digital Manifest Twitter Bot. """ def get_auth(self): auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_KEY, TWITTER_ACCESS_SECRET) return auth def get_api(self): auth = self.get_auth() return tweepy.API(auth) def tweet(self, text): if not isinstance(text, str): raise NotImplemented('Can only tweet strings.') # Escape SMS commands pattern = re.compile( r'^(ON|OFF|FOLLOW|F|UNFOLLOW|LEAVE|L|STOP|QUIT|END|CANCEL|' r'UNSBSCRIBE|ARRET|D|M|RETWEET|RT|SET|WHOIS|W|GET|G|FAV|FAVE|' r'FAVORITE|FAVORITE|\*|STATS|SUGGEST|SUG|S|WTF|HELP|INFO|AIDE|' r'BLOCK|BLK|REPORT|REP)(\W)(.*)', re.I) text = re.sub(pattern, '\\1\u200B\\2\\3', text) text = text[:140] api = self.get_api() api.update_status(status=text)
mit
Python
4b4b689463c0e6d0db783a10fcf74b21fea60a68
Fix double repr.
nex3/pygments,Khan/pygments,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nex3/pygments,dbrgn/pygments-mirror,nsfmc/pygments,nsfmc/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,nsfmc/pygments,dbrgn/pygments-mirror,nsfmc/pygments,dbrgn/pygments-mirror,dbrgn/pygments-mirror,Khan/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nex3/pygments,kirbyfan64/pygments-unofficial,nsfmc/pygments,nsfmc/pygments,Khan/pygments,kirbyfan64/pygments-unofficial,nsfmc/pygments,dbrgn/pygments-mirror,nex3/pygments,nsfmc/pygments,Khan/pygments,kirbyfan64/pygments-unofficial,nex3/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,Khan/pygments,nex3/pygments,dbrgn/pygments-mirror,nsfmc/pygments,Khan/pygments,nex3/pygments,kirbyfan64/pygments-unofficial,nsfmc/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,Khan/pygments,dbrgn/pygments-mirror,nsfmc/pygments,Khan/pygments,nex3/pygments,nex3/pygments,Khan/pygments,Khan/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nsfmc/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,nex3/pygments,dbrgn/pygments-mirror,dbrgn/pygments-mirror,Khan/pygments,nex3/pygments,kirbyfan64/pygments-unofficial
pygments/formatters/other.py
pygments/formatters/other.py
# -*- coding: utf-8 -*- """ pygments.formatters.other ~~~~~~~~~~~~~~~~~~~~~~~~~ Other formatters: NullFormatter, RawTokenFormatter. :copyright: 2006 by Georg Brandl, Armin Ronacher. :license: BSD, see LICENSE for more details. """ from pygments.formatter import Formatter __all__ = ['NullFormatter', 'RawTokenFormatter'] class NullFormatter(Formatter): """ Output the text unchanged without any formatting. """ def format(self, tokensource, outfile): for ttype, value in tokensource: outfile.write(value.encode(self.encoding)) class RawTokenFormatter(Formatter): """ Output a raw token representation for storing token streams. The format is ``tokentype<TAB>repr(tokenstring)`` Additional options accepted: ``compress`` If set to "gz" or "bz2", compress the token stream with the given compression algorithm (default: ''). """ def __init__(self, **options): Formatter.__init__(self, **options) self.compress = options.get('compress', '') def format(self, tokensource, outfile): if self.compress == 'gz': import gzip outfile = gzip.GzipFile('', 'wb', 9, outfile) write = outfile.write flush = outfile.flush elif self.compress == 'bz2': import bz2 compressor = bz2.BZ2Compressor(9) def write(text): outfile.write(compressor.compress(text)) def flush(): outfile.write(compressor.flush()) outfile.flush() else: write = outfile.write flush = outfile.flush lasttype = None lastval = u'' for ttype, value in tokensource: value = repr(value) if ttype is lasttype: lastval += value else: if lasttype: write("%s\t%s\n" % (lasttype, lastval)) lastval = value lasttype = ttype write("%s\t%s\n" % (lasttype, lastval)) flush()
# -*- coding: utf-8 -*- """ pygments.formatters.other ~~~~~~~~~~~~~~~~~~~~~~~~~ Other formatters: NullFormatter, RawTokenFormatter. :copyright: 2006 by Georg Brandl, Armin Ronacher. :license: BSD, see LICENSE for more details. """ from pygments.formatter import Formatter __all__ = ['NullFormatter', 'RawTokenFormatter'] class NullFormatter(Formatter): """ Output the text unchanged without any formatting. """ def format(self, tokensource, outfile): for ttype, value in tokensource: outfile.write(value.encode(self.encoding)) class RawTokenFormatter(Formatter): """ Output a raw token representation for storing token streams. The format is ``tokentype<TAB>repr(tokenstring)`` Additional options accepted: ``compress`` If set to "gz" or "bz2", compress the token stream with the given compression algorithm (default: ''). """ def __init__(self, **options): Formatter.__init__(self, **options) self.compress = options.get('compress', '') def format(self, tokensource, outfile): if self.compress == 'gz': import gzip outfile = gzip.GzipFile('', 'wb', 9, outfile) write = outfile.write flush = outfile.flush elif self.compress == 'bz2': import bz2 compressor = bz2.BZ2Compressor(9) def write(text): outfile.write(compressor.compress(text)) def flush(): outfile.write(compressor.flush()) outfile.flush() else: write = outfile.write flush = outfile.flush lasttype = None lastval = u'' for ttype, value in tokensource: value = repr(value) if ttype is lasttype: lastval += value else: if lasttype: write("%s\t%r\n" % (lasttype, lastval)) lastval = value lasttype = ttype write("%s\t%r\n" % (lasttype, lastval)) flush()
bsd-2-clause
Python
2a7aee189dff539fe3cf8049319a2b09c6a0fbb1
add new filter to dataset config
matthias-k/pysaliency,matthias-k/pysaliency
pysaliency/dataset_config.py
pysaliency/dataset_config.py
from .datasets import read_hdf5 from .filter_datasets import ( filter_fixations_by_number, filter_stimuli_by_number, filter_stimuli_by_size, train_split, validation_split, test_split ) from schema import Schema, Optional dataset_config_schema = Schema({ 'stimuli': str, 'fixations': str, Optional('filters', default=[]): [{ 'type': str, Optional('parameters', default={}): dict, }], }) def load_dataset_from_config(config): config = dataset_config_schema.validate(config) stimuli = read_hdf5(config['stimuli']) fixations = read_hdf5(config['fixations']) for filter_config in config['filters']: stimuli, fixations = apply_dataset_filter_config(stimuli, fixations, filter_config) return stimuli, fixations def apply_dataset_filter_config(stimuli, fixations, filter_config): filter_dict = { 'filter_fixations_by_number': add_stimuli_argument(filter_fixations_by_number), 'filter_stimuli_by_number': filter_stimuli_by_number, 'filter_stimuli_by_size': filter_stimuli_by_size, 'train_split': train_split, 'validation_split': validation_split, 'test_split': test_split, } if filter_config['type'] not in filter_dict: raise ValueError("Invalid filter name: {}".format(filter_config['type'])) filter_fn = filter_dict[filter_config['type']] return filter_fn(stimuli, fixations, **filter_config['parameters']) def add_stimuli_argument(fn): def wrapped(stimuli, fixations, **kwargs): new_fixations = fn(fixations, **kwargs) return stimuli, new_fixations return wrapped
from .datasets import read_hdf5 from .filter_datasets import filter_fixations_by_number, filter_stimuli_by_number, train_split, validation_split, test_split from schema import Schema, Optional dataset_config_schema = Schema({ 'stimuli': str, 'fixations': str, Optional('filters', default=[]): [{ 'type': str, Optional('parameters', default={}): dict, }], }) def load_dataset_from_config(config): config = dataset_config_schema.validate(config) stimuli = read_hdf5(config['stimuli']) fixations = read_hdf5(config['fixations']) for filter_config in config['filters']: stimuli, fixations = apply_dataset_filter_config(stimuli, fixations, filter_config) return stimuli, fixations def apply_dataset_filter_config(stimuli, fixations, filter_config): filter_dict = { 'filter_fixations_by_number': add_stimuli_argument(filter_fixations_by_number), 'filter_stimuli_by_number': filter_stimuli_by_number, 'train_split': train_split, 'validation_split': validation_split, 'test_split': test_split, } if filter_config['type'] not in filter_dict: raise ValueError("Invalid filter name: {}".format(filter_config['type'])) filter_fn = filter_dict[filter_config['type']] return filter_fn(stimuli, fixations, **filter_config['parameters']) def add_stimuli_argument(fn): def wrapped(stimuli, fixations, **kwargs): new_fixations = fn(fixations, **kwargs) return stimuli, new_fixations return wrapped
mit
Python
46e21ff57d47f1860d639972dc4eed1994a6cd50
remove print statements
crowd-course/scholars,crowd-course/scholars,crowd-course/scholars
scholars/authentication/pipeline.py
scholars/authentication/pipeline.py
import hashlib from social_core.exceptions import AuthAlreadyAssociated, AuthException def auto_logout(*args, **kwargs): """Do not compare current user with new one""" return {'user': None} def check_email_present(backend, uid, user=None, *args, **kwargs): if not kwargs['details'].get('email'): raise AuthException(backend, "Email wasn't provided by oauth provider") def social_user(backend, uid, user=None, *args, **kwargs): provider = backend.name social = backend.strategy.storage.user.get_social_auth(provider, uid) if social: # can happen when user has multiple accounts with same email (apply email uniqueness strictly) if user and social.user != user: msg = 'This {0} account is already in use.'.format(provider) raise AuthAlreadyAssociated(backend, msg) elif not user: user = social.user return {'social': social, 'user': user, 'is_new': user is None, 'new_association': social is None} def save_avatar(strategy, details, user=None, *args, **kwargs): """Get user avatar from social provider.""" if user: backend_name = kwargs['backend'].__class__.__name__.lower() response = kwargs.get('response', {}) avatar = None if 'google-oauth2' in backend_name and response.get('image', {}).get('url'): avatar = response['image']['url'].split('?')[0] else: avatar = 'http://www.gravatar.com/avatar/' avatar += hashlib.md5(user.email.lower().encode('utf8')).hexdigest() avatar += '?size=100' if avatar and user.avatar != avatar: user.avatar = avatar strategy.storage.user.changed(user)
import hashlib from social_core.exceptions import AuthAlreadyAssociated, AuthException def auto_logout(*args, **kwargs): """Do not compare current user with new one""" return {'user': None} def check_email_present(backend, uid, user=None, *args, **kwargs): if not kwargs['details'].get('email'): raise AuthException(backend, "Email wasn't provided by oauth provider") def social_user(backend, uid, user=None, *args, **kwargs): provider = backend.name social = backend.strategy.storage.user.get_social_auth(provider, uid) if social: # can happen when user has multiple accounts with same email (apply email uniqueness strictly) print user print social if user and social.user != user: msg = 'This {0} account is already in use.'.format(provider) raise AuthAlreadyAssociated(backend, msg) elif not user: user = social.user return {'social': social, 'user': user, 'is_new': user is None, 'new_association': social is None} def save_avatar(strategy, details, user=None, *args, **kwargs): """Get user avatar from social provider.""" if user: backend_name = kwargs['backend'].__class__.__name__.lower() response = kwargs.get('response', {}) avatar = None if 'google-oauth2' in backend_name and response.get('image', {}).get('url'): avatar = response['image']['url'].split('?')[0] else: avatar = 'http://www.gravatar.com/avatar/' avatar += hashlib.md5(user.email.lower().encode('utf8')).hexdigest() avatar += '?size=100' if avatar and user.avatar != avatar: user.avatar = avatar strategy.storage.user.changed(user)
mit
Python
28917935e5086ff6a03964babbb5c2e09957b582
Bump version
thombashi/pytablewriter
pytablewriter/__version__.py
pytablewriter/__version__.py
# encoding: utf-8 from datetime import datetime __author__ = "Tsuyoshi Hombashi" __copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__) __license__ = "MIT License" __version__ = "0.47.0" __maintainer__ = __author__ __email__ = "[email protected]"
# encoding: utf-8 from datetime import datetime __author__ = "Tsuyoshi Hombashi" __copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__) __license__ = "MIT License" __version__ = "0.46.3" __maintainer__ = __author__ __email__ = "[email protected]"
mit
Python
f5b8b4bafabc06504e2ee2e0571f2d8571db17bb
Update for v1.5.4
maxmind/MaxMind-DB-Reader-python,maxmind/MaxMind-DB-Reader-python,maxmind/MaxMind-DB-Reader-python
maxminddb/__init__.py
maxminddb/__init__.py
# pylint:disable=C0111 import os import maxminddb.reader try: import maxminddb.extension except ImportError: maxminddb.extension = None from maxminddb.const import ( MODE_AUTO, MODE_MMAP, MODE_MMAP_EXT, MODE_FILE, MODE_MEMORY, MODE_FD, ) from maxminddb.decoder import InvalidDatabaseError def open_database(database, mode=MODE_AUTO): """Open a Maxmind DB database Arguments: database -- A path to a valid MaxMind DB file such as a GeoIP2 database file, or a file descriptor in the case of MODE_FD. mode -- mode to open the database with. Valid mode are: * MODE_MMAP_EXT - use the C extension with memory map. * MODE_MMAP - read from memory map. Pure Python. * MODE_FILE - read database as standard file. Pure Python. * MODE_MEMORY - load database into memory. Pure Python. * MODE_FD - the param passed via database is a file descriptor, not a path. This mode implies MODE_MEMORY. * MODE_AUTO - tries MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order. Default mode. """ has_extension = maxminddb.extension and hasattr(maxminddb.extension, "Reader") if (mode == MODE_AUTO and has_extension) or mode == MODE_MMAP_EXT: if not has_extension: raise ValueError( "MODE_MMAP_EXT requires the maxminddb.extension module to be available" ) return maxminddb.extension.Reader(database) if mode in (MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY, MODE_FD): return maxminddb.reader.Reader(database, mode) raise ValueError("Unsupported open mode: {0}".format(mode)) def Reader(database): # pylint: disable=invalid-name """This exists for backwards compatibility. Use open_database instead""" return open_database(database) __title__ = "maxminddb" __version__ = "1.5.4" __author__ = "Gregory Oschwald" __license__ = "Apache License, Version 2.0" __copyright__ = "Copyright 2013-2019 Maxmind, Inc."
# pylint:disable=C0111 import os import maxminddb.reader try: import maxminddb.extension except ImportError: maxminddb.extension = None from maxminddb.const import ( MODE_AUTO, MODE_MMAP, MODE_MMAP_EXT, MODE_FILE, MODE_MEMORY, MODE_FD, ) from maxminddb.decoder import InvalidDatabaseError def open_database(database, mode=MODE_AUTO): """Open a Maxmind DB database Arguments: database -- A path to a valid MaxMind DB file such as a GeoIP2 database file, or a file descriptor in the case of MODE_FD. mode -- mode to open the database with. Valid mode are: * MODE_MMAP_EXT - use the C extension with memory map. * MODE_MMAP - read from memory map. Pure Python. * MODE_FILE - read database as standard file. Pure Python. * MODE_MEMORY - load database into memory. Pure Python. * MODE_FD - the param passed via database is a file descriptor, not a path. This mode implies MODE_MEMORY. * MODE_AUTO - tries MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order. Default mode. """ has_extension = maxminddb.extension and hasattr(maxminddb.extension, "Reader") if (mode == MODE_AUTO and has_extension) or mode == MODE_MMAP_EXT: if not has_extension: raise ValueError( "MODE_MMAP_EXT requires the maxminddb.extension module to be available" ) return maxminddb.extension.Reader(database) if mode in (MODE_AUTO, MODE_MMAP, MODE_FILE, MODE_MEMORY, MODE_FD): return maxminddb.reader.Reader(database, mode) raise ValueError("Unsupported open mode: {0}".format(mode)) def Reader(database): # pylint: disable=invalid-name """This exists for backwards compatibility. Use open_database instead""" return open_database(database) __title__ = "maxminddb" __version__ = "1.5.3" __author__ = "Gregory Oschwald" __license__ = "Apache License, Version 2.0" __copyright__ = "Copyright 2013-2019 Maxmind, Inc."
apache-2.0
Python
5f9da62f28e61636f33495058f3ea4a98a9d3c19
add invalid separators to test
geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx,geometalab/osmaxx-frontend
tests/inside_worker_test/cast_to_float_or_null_test.py
tests/inside_worker_test/cast_to_float_or_null_test.py
import pytest import sqlalchemy from tests.inside_worker_test.conftest import slow @pytest.fixture(params=[2, 2.2, 3.898986, 0.6, 0]) def valid_float_representation(request): return request.param @pytest.fixture(params=["a2", "10b", "3.898986c", "3d.898986", "e6.9", "f0,9" "0,g9" "0,9h", "0,6", "123'456", "1 290", None]) def invalid_floats(request): return request.param @slow def test_cast_to_float_null_if_failed_returns_floats_with_valid_floats(osmaxx_functions, valid_float_representation): engine = osmaxx_functions result = engine.execute( sqlalchemy.text( "select cast_to_float_null_if_failed($${}$$) as float_value;".format(valid_float_representation) ).execution_options(autocommit=True) ) assert result.rowcount == 1 results = result.fetchall() assert len(results) == 1 assert results[0]['float_value'] == float(valid_float_representation) @slow def test_cast_to_float_null_if_failed_returns_null_with_invalid_floats(osmaxx_functions, invalid_floats): engine = osmaxx_functions result = engine.execute( sqlalchemy.text( "select cast_to_float_null_if_failed($${}$$) as float_value;".format(invalid_floats) ).execution_options(autocommit=True) ) assert result.rowcount == 1 results = result.fetchall() assert len(results) == 1 assert results[0]['float_value'] is None
import pytest import sqlalchemy from tests.inside_worker_test.conftest import slow @pytest.fixture(params=[2, 2.2, 3.898986, "3.898986", "6", "0.2", 0.6]) def valid_float_representation(request): return request.param @pytest.fixture(params=["a2", "10b", "3.898986k", "3k.898986", "l6.9"]) def invalid_floats(request): return request.param @slow def test_cast_to_float_null_if_failed_returns_floats_with_valid_floats(osmaxx_functions, valid_float_representation): engine = osmaxx_functions result = engine.execute( sqlalchemy.text( "select cast_to_float_null_if_failed($${}$$) as float_value;".format(valid_float_representation) ).execution_options(autocommit=True) ) assert result.rowcount == 1 results = result.fetchall() assert len(results) == 1 assert results[0]['float_value'] == float(valid_float_representation) @slow def test_cast_to_float_null_if_failed_returns_null_with_invalid_floats(osmaxx_functions, invalid_floats): engine = osmaxx_functions result = engine.execute( sqlalchemy.text( "select cast_to_float_null_if_failed($${}$$) as float_value;".format(invalid_floats) ).execution_options(autocommit=True) ) assert result.rowcount == 1 results = result.fetchall() assert len(results) == 1 assert results[0]['float_value'] is None
mit
Python
b3e1b6bd9f79427142ebfe4b57892d1cf3a89e86
Implement the latest test spec for update which requires most of the parameters found in an example usage of mlab-ns against npad.
m-lab/ooni-support,hellais/ooni-support,m-lab/ooni-support,hellais/ooni-support
mlab-ns-simulator/mlabsim/update.py
mlab-ns-simulator/mlabsim/update.py
""" This approximates the mlab-ns slice information gathering. The actual system uses nagios and we're not certain about the details. This much simplified version is just a web URL anyone may PUT data into. Warning: This doesn't have any security properties! We need a way to prevent the addition of malicious entries. """ import json from twisted.web import resource from twisted.web.server import NOT_DONE_YET DBEntryNames = [ 'city', 'country', 'fqdn', 'ip', 'port', 'site', 'tool_extra', ] class UpdateResource (resource.Resource): def __init__(self, db): """db is a dict which will be modified to map { fqdn -> other_details }""" resource.Resource.__init__(self) self._db = db def render_PUT(self, request): dbentry = {} for name in DBEntryNames: # BUG: Multiple values not handled nor tested: [value] = request.args[name] if name == 'tool_extra': try: value = json.loads(value) except ValueError: request.setResponseCode(400, 'invalid') request.finish() return NOT_DONE_YET dbentry[name] = value self._db[dbentry['fqdn']] = dbentry request.setResponseCode(200, 'ok') request.finish() return NOT_DONE_YET
""" This approximates the mlab-ns slice information gathering. The actual system uses nagios and we're not certain about the details. This much simplified version is just a web URL anyone may PUT data into. Warning: This doesn't have any security properties! We need a way to prevent the addition of malicious entries. """ import json from twisted.web import resource from twisted.web.server import NOT_DONE_YET class UpdateResource (resource.Resource): def __init__(self, db): """db is a dict which will be modified to map { fqdn -> other_details }""" resource.Resource.__init__(self) self._db = db def render_PUT(self, request): [fqdn, tool_extra_json] = self._parse_args(request.args) try: tool_extra = json.loads(tool_extra_json) except ValueError: request.setResponseCode(400, 'invalid') request.finish() else: self._db[fqdn] = {'tool_extra': tool_extra} request.setResponseCode(200, 'ok') request.finish() return NOT_DONE_YET def _parse_args(self, args): for name in ['fqdn', 'tool_extra']: [val] = args[name] yield val
apache-2.0
Python
45a24fae9f5e1ee24c2e0283746224e51f718cc2
Remove redundant test of permissions parameter
simonjbeaumont/planex,djs55/planex,simonjbeaumont/planex,djs55/planex,simonjbeaumont/planex,djs55/planex
planex/tree.py
planex/tree.py
""" In-memory 'filesystem' library """ import os class Tree(object): """ An in-memory 'filesystem' which accumulates file changes to be written later. """ def __init__(self): self.tree = {} def append(self, filename, contents=None, permissions=None): """ Append contents to filename in the in-memory filesystem. """ node = self.tree.get(filename, {}) if contents: node['contents'] = node.get('contents', '') + contents if permissions: if 'permissions' in node and \ node['permissions'] != permissions: raise Exception("Inconsistent permissions for '%s'" % filename) node['permissions'] = permissions self.tree[filename] = node def apply(self, basepath): """ Save in-memory filesystem to disk. """ for subpath, node in self.tree.items(): permissions = node.get("permissions", 0o644) contents = node.get("contents", "") fullpath = os.path.join(basepath, subpath) if not os.path.isdir(os.path.dirname(fullpath)): os.makedirs(os.path.dirname(fullpath)) out = os.open(os.path.join(basepath, subpath), os.O_WRONLY | os.O_CREAT, permissions) os.write(out, contents) os.close(out) def __repr__(self): res = "" for subpath, node in self.tree.items(): permissions = node.get("permissions", 0o644) contents = node.get("contents", "") res += "%s (0o%o):\n" % (subpath, permissions) res += contents res += "\n\n" return res
""" In-memory 'filesystem' library """ import os class Tree(object): """ An in-memory 'filesystem' which accumulates file changes to be written later. """ def __init__(self): self.tree = {} def append(self, filename, contents=None, permissions=None): """ Append contents to filename in the in-memory filesystem. """ node = self.tree.get(filename, {}) if contents: node['contents'] = node.get('contents', '') + contents if permissions: if 'permissions' in node and \ node['permissions'] != permissions: raise Exception("Inconsistent permissions for '%s'" % filename) if permissions: node['permissions'] = permissions else: node['permissions'] = 0o644 self.tree[filename] = node def apply(self, basepath): """ Save in-memory filesystem to disk. """ for subpath, node in self.tree.items(): permissions = node.get("permissions", 0o644) contents = node.get("contents", "") fullpath = os.path.join(basepath, subpath) if not os.path.isdir(os.path.dirname(fullpath)): os.makedirs(os.path.dirname(fullpath)) out = os.open(os.path.join(basepath, subpath), os.O_WRONLY | os.O_CREAT, permissions) os.write(out, contents) os.close(out) def __repr__(self): res = "" for subpath, node in self.tree.items(): permissions = node.get("permissions", 0o644) contents = node.get("contents", "") res += "%s (0o%o):\n" % (subpath, permissions) res += contents res += "\n\n" return res
lgpl-2.1
Python
7ad7f0231bc50c58f9b606cbab36d6cd98e141ec
Make the error message clearer (#944)
akaszynski/vtkInterface
pyvista/plotting/__init__.py
pyvista/plotting/__init__.py
"""Plotting routines.""" from .colors import (color_char_to_word, get_cmap_safe, hex_to_rgb, hexcolors, string_to_rgb, PARAVIEW_BACKGROUND) from .export_vtkjs import export_plotter_vtkjs, get_vtkjs_url from .helpers import plot, plot_arrows, plot_compare_four, plot_itk from .itkplotter import PlotterITK from .plotting import BasePlotter, Plotter, close_all from .renderer import CameraPosition, Renderer, scale_point from .theme import (DEFAULT_THEME, FONT_KEYS, MAX_N_COLOR_BARS, parse_color, parse_font_family, rcParams, set_plot_theme) from .tools import (create_axes_marker, create_axes_orientation_box, opacity_transfer_function, system_supports_plotting) from .widgets import WidgetHelper class QtDeprecationError(Exception): """Depreciation Error for features that moved to `pyvistaqt`.""" message = """`{}` has moved to pyvistaqt. You can install this from PyPI with: `pip install pyvistaqt` Then import it via: `from pyvistaqt import {}` `{}` is no longer accessible by `pyvista.{}` See https://github.com/pyvista/pyvistaqt """ def __init__(self, feature_name): """Empty init.""" Exception.__init__(self, self.message.format(*[feature_name] * 4)) class BackgroundPlotter(): """This class has been moved to pyvistaqt.""" def __init__(self, *args, **kwargs): """Empty init.""" raise QtDeprecationError('BackgroundPlotter') class QtInteractor(): """This class has been moved to pyvistaqt.""" def __init__(self, *args, **kwargs): """Empty init.""" raise QtDeprecationError('QtInteractor')
"""Plotting routines.""" from .colors import (color_char_to_word, get_cmap_safe, hex_to_rgb, hexcolors, string_to_rgb, PARAVIEW_BACKGROUND) from .export_vtkjs import export_plotter_vtkjs, get_vtkjs_url from .helpers import plot, plot_arrows, plot_compare_four, plot_itk from .itkplotter import PlotterITK from .plotting import BasePlotter, Plotter, close_all from .renderer import CameraPosition, Renderer, scale_point from .theme import (DEFAULT_THEME, FONT_KEYS, MAX_N_COLOR_BARS, parse_color, parse_font_family, rcParams, set_plot_theme) from .tools import (create_axes_marker, create_axes_orientation_box, opacity_transfer_function, system_supports_plotting) from .widgets import WidgetHelper class QtDeprecationError(Exception): """Depreciation Error for features that moved to `pyvistaqt`.""" message = """`{}` has moved to pyvistaqt. You can install this from PyPI with: `pip install pyvistaqt` See https://github.com/pyvista/pyvistaqt """ def __init__(self, feature_name): """Empty init.""" Exception.__init__(self, self.message.format(feature_name)) class BackgroundPlotter(): """This class has been moved to pyvistaqt.""" def __init__(self, *args, **kwargs): """Empty init.""" raise QtDeprecationError('BackgroundPlotter') class QtInteractor(): """This class has been moved to pyvistaqt.""" def __init__(self, *args, **kwargs): """Empty init.""" raise QtDeprecationError('QtInteractor')
mit
Python
f53e7452676e6ee903a4d8c350fa356a718a5fcc
Add a test for file: and path: searches for non-ASCII things.
pelmers/dxr,pelmers/dxr,pelmers/dxr,pelmers/dxr,pelmers/dxr,pelmers/dxr,pelmers/dxr
tests/test_path_file_filters/test_path_file_filters.py
tests/test_path_file_filters/test_path_file_filters.py
# -*- coding: utf-8 -*- from nose.tools import raises from dxr.testing import DxrInstanceTestCase class PathAndFileFilterTests(DxrInstanceTestCase): """Basic tests for functionality of the 'path:' and 'file:' filters""" def test_basic_path_results(self): """Check that a 'path:' result includes both file and folder matches.""" self.found_files_eq('path:fish', ['fish1', 'fishy_folder/fish2', 'fishy_folder/gill', 'folder/fish3', 'folder/fish4']) def test_basic_file_results(self): """Check that a 'file:' result includes only file matches.""" self.found_files_eq('file:fish', ['fish1', 'fishy_folder/fish2', 'folder/fish3', 'folder/fish4']) def test_path_and_file_line_promotion(self): """Make sure promotion of a 'path:' or 'file:' filter to a LINE query works. """ self.found_files_eq('path:fish fins', ['folder/fish3']) self.found_files_eq('file:fish fins', ['folder/fish3']) # This fails because we currently intentionally exclude folder paths from # FILE query results - remove the @raises line when that's changed. (Of # course then other tests here will need to be updated as well.) @raises(AssertionError) def test_empty_folder_path_results(self): """Check that 'path:' results include empty folders.""" self.found_files_eq('path:empty_folder', ['empty_folder']) def test_basic_wildcard(self): """Test basic wildcard functionality.""" # 'path:' and 'file:' currently have the same underlying wildcard # support, so we're spreading out the basic wildcard testing over both. self.found_files_eq('path:fish?_fo*er', ['fishy_folder/fish2', 'fishy_folder/gill']) self.found_files_eq('file:fish[14]', ['fish1', 'folder/fish4']) def test_unicode(self): """Make sure searching for non-ASCII names works.""" self.found_files_eq(u'file:fre\u0301mium*', [u'fre\u0301mium.txt']) # This one fails because é is normalized differently in ES than here: # self.found_files_eq(u'file:frémium*', [u'frémium.txt'])
from nose.tools import raises from dxr.testing import DxrInstanceTestCase class PathAndFileFilterTests(DxrInstanceTestCase): """Basic tests for functionality of the 'path:' and 'file:' filters""" def test_basic_path_results(self): """Check that a 'path:' result includes both file and folder matches.""" self.found_files_eq('path:fish', ['fish1', 'fishy_folder/fish2', 'fishy_folder/gill', 'folder/fish3', 'folder/fish4']) def test_basic_file_results(self): """Check that a 'file:' result includes only file matches.""" self.found_files_eq('file:fish', ['fish1', 'fishy_folder/fish2', 'folder/fish3', 'folder/fish4']) def test_path_and_file_line_promotion(self): """Make sure promotion of a 'path:' or 'file:' filter to a LINE query works. """ self.found_files_eq('path:fish fins', ['folder/fish3']) self.found_files_eq('file:fish fins', ['folder/fish3']) # This fails because we currently intentionally exclude folder paths from # FILE query results - remove the @raises line when that's changed. (Of # course then other tests here will need to be updated as well.) @raises(AssertionError) def test_empty_folder_path_results(self): """Check that 'path:' results include empty folders.""" self.found_files_eq('path:empty_folder', ['empty_folder']) def test_basic_wildcard(self): """Test basic wildcard functionality.""" # 'path:' and 'file:' currently have the same underlying wildcard # support, so we're spreading out the basic wildcard testing over both. self.found_files_eq('path:fish?_fo*er', ['fishy_folder/fish2', 'fishy_folder/gill']) self.found_files_eq('file:fish[14]', ['fish1', 'folder/fish4'])
mit
Python
d22bd8970b973fb58f1358b62cf8c27f826aa407
update example
hhatto/pgmagick,hhatto/pgmagick,hhatto/pgmagick
example/gravity.py
example/gravity.py
from pgmagick import Image, Geometry, Color, TypeMetric, \ DrawableText, DrawableList, DrawableGravity, GravityType im = Image(Geometry(600, 600), Color("transparent")) im.fontPointsize(30) im.fillColor(Color("#f010f0")) im.strokeColor(Color("transparent")) im.font("Vera.ttf") dl = DrawableList() dl.append(DrawableGravity(GravityType.CenterGravity)) dl.append(DrawableText(0, 0, "center")) tm = TypeMetric() im.fontTypeMetrics("northn", tm) font_height = tm.textHeight() dl.append(DrawableGravity(GravityType.NorthGravity)) dl.append(DrawableText(0, font_height / 2., "north")) dl.append(DrawableGravity(GravityType.WestGravity)) dl.append(DrawableText(0, 0, "west")) dl.append(DrawableGravity(GravityType.EastGravity)) dl.append(DrawableText(0, 0, "east")) dl.append(DrawableText(0, 20, "east-long")) dl.append(DrawableGravity(GravityType.SouthGravity)) dl.append(DrawableText(0, 0, "south")) dl.append(DrawableGravity(GravityType.NorthWestGravity)) dl.append(DrawableText(0, font_height / 2., "north-west")) dl.append(DrawableGravity(GravityType.NorthEastGravity)) dl.append(DrawableText(0, font_height / 2., "north-east")) dl.append(DrawableGravity(GravityType.SouthWestGravity)) dl.append(DrawableText(0, 0, "south-west")) dl.append(DrawableGravity(GravityType.SouthEastGravity)) dl.append(DrawableText(0, 0, "south-east")) im.draw(dl) im.write("test.png")
from pgmagick import Image, Geometry, Color, TypeMetric, \ DrawableText, DrawableList, DrawableGravity, GravityType im = Image(Geometry(600, 600), Color("transparent")) im.fontPointsize(30) im.fillColor(Color("#f010f0")) im.strokeColor(Color("transparent")) im.font("Vera.ttf") dl = DrawableList() dl.append(DrawableGravity(GravityType.CenterGravity)) dl.append(DrawableText(0, 0, "center")) tm = TypeMetric() im.fontTypeMetrics("northn", tm) font_height = tm.textHeight() dl.append(DrawableGravity(GravityType.NorthGravity)) dl.append(DrawableText(0, font_height / 2., "north")) dl.append(DrawableGravity(GravityType.WestGravity)) dl.append(DrawableText(0, 0, "west")) dl.append(DrawableGravity(GravityType.EastGravity)) dl.append(DrawableText(0, 0, "east")) dl.append(DrawableGravity(GravityType.SouthGravity)) dl.append(DrawableText(0, 0, "south")) dl.append(DrawableGravity(GravityType.NorthWestGravity)) dl.append(DrawableText(0, font_height / 2., "north-west")) dl.append(DrawableGravity(GravityType.NorthEastGravity)) dl.append(DrawableText(0, font_height / 2., "north-east")) dl.append(DrawableGravity(GravityType.SouthWestGravity)) dl.append(DrawableText(0, 0, "south-west")) dl.append(DrawableGravity(GravityType.SouthEastGravity)) dl.append(DrawableText(0, 0, "south-east")) im.draw(dl) im.write("test.png")
mit
Python
73e99078b3bce587e059b1a15dbb7f94be70dd8d
enable the possibility of success
open-power/op-test-framework,open-power/op-test-framework,open-power/op-test-framework
testcases/OpalMsglog.py
testcases/OpalMsglog.py
#!/usr/bin/python2 # OpenPOWER Automated Test Project # # Contributors Listed Below - COPYRIGHT 2017 # [+] International Business Machines Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # import unittest import OpTestConfiguration from common.OpTestSystem import OpSystemState from common.OpTestConstants import OpTestConstants as BMC_CONST from common.Exceptions import CommandFailed class OpalMsglog(): def setUp(self): conf = OpTestConfiguration.conf self.cv_HOST = conf.host() self.cv_IPMI = conf.ipmi() self.cv_SYSTEM = conf.system() def runTest(self): self.setup_test() try: log_entries = self.c.run_command("grep ',[0-4]\]' /sys/firmware/opal/msglog") msg = '\n'.join(filter(None, log_entries)) self.assertTrue( len(log_entries) == 0, "Warnings/Errors in OPAL log:\n%s" % msg) except CommandFailed as cf: if cf.exitcode is 1 and len(cf.output) is 0: pass else: raise cf class Skiroot(OpalMsglog, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL) self.c = self.cv_SYSTEM.sys_get_ipmi_console() self.cv_SYSTEM.host_console_unique_prompt() class Host(OpalMsglog, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.OS) self.c = self.cv_SYSTEM.host().get_ssh_connection()
#!/usr/bin/python2 # OpenPOWER Automated Test Project # # Contributors Listed Below - COPYRIGHT 2017 # [+] International Business Machines Corp. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # import unittest import OpTestConfiguration from common.OpTestSystem import OpSystemState from common.OpTestConstants import OpTestConstants as BMC_CONST class OpalMsglog(): def setUp(self): conf = OpTestConfiguration.conf self.cv_HOST = conf.host() self.cv_IPMI = conf.ipmi() self.cv_SYSTEM = conf.system() def runTest(self): self.setup_test() log_entries = self.c.run_command("grep ',[0-4]\]' /sys/firmware/opal/msglog") msg = '\n'.join(filter(None, log_entries)) self.assertTrue( len(log_entries) == 0, "Warnings/Errors in OPAL log:\n%s" % msg) class Skiroot(OpalMsglog, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL) self.c = self.cv_SYSTEM.sys_get_ipmi_console() self.cv_SYSTEM.host_console_unique_prompt() class Host(OpalMsglog, unittest.TestCase): def setup_test(self): self.cv_SYSTEM.goto_state(OpSystemState.OS) self.c = self.cv_SYSTEM.host().get_ssh_connection()
apache-2.0
Python
6cb0822aade07999d54e5fcd19eb2c7322abc80a
Improve performance @ Measurement Admin
sigurdsa/angelika-api
measurement/admin.py
measurement/admin.py
from django.contrib import admin from .models import Measurement class MeasurementAdmin(admin.ModelAdmin): model = Measurement def get_queryset(self, request): return super(MeasurementAdmin, self).get_queryset(request).select_related('patient__user') admin.site.register(Measurement, MeasurementAdmin)
from django.contrib import admin from .models import Measurement admin.site.register(Measurement)
mit
Python
24b2509b1605dfd6d3eb325ed946c3d23441b969
use Python's QT stuff
visionegg/visionegg,visionegg/visionegg,visionegg/visionegg,visionegg/visionegg,visionegg/visionegg
demo/quicktime.py
demo/quicktime.py
#!/usr/bin/env python """Display quicktime movie.""" import os, sys import VisionEgg from VisionEgg.Core import * from VisionEgg.Text import * from VisionEgg.Textures import * from VisionEgg.QuickTime import new_movie_from_filename, MovieTexture screen = get_default_screen() screen.set(bgcolor=(0,0,0)) if len(sys.argv) > 1: filename = sys.argv[1] else: filename = os.path.join(VisionEgg.config.VISIONEGG_SYSTEM_DIR,"data","water.mov") movie = new_movie_from_filename(filename) # movie is type Carbon.Qt.Movie bounds = movie.GetMovieBox() width = bounds[2]-bounds[0] height = bounds[3]-bounds[1] scale_x = screen.size[0]/float(width) scale_y = screen.size[1]/float(height) scale = min(scale_x,scale_y) # maintain aspect ratio movie_texture = MovieTexture(movie=movie) stimulus = TextureStimulus( texture=movie_texture, position = (screen.size[0]/2.0,screen.size[1]/2.0), anchor = 'center', mipmaps_enabled = False, # can't do mipmaps with QuickTime movies shrink_texture_ok = True, size = (width*scale, height*scale), ) text = Text( text = "Vision Egg QuickTime movie demo - Press any key to quit", position = (screen.size[0]/2,screen.size[1]), anchor = 'top', color = (1.0, 1.0, 1.0), ) viewport = Viewport(screen=screen, stimuli=[stimulus, text]) movie.StartMovie() frame_timer = FrameTimer() while not pygame.event.peek((pygame.locals.QUIT, pygame.locals.KEYDOWN, pygame.locals.MOUSEBUTTONDOWN)): movie.MoviesTask(0) screen.clear() viewport.draw() swap_buffers() # display the frame we've drawn in back buffer frame_timer.tick() if movie.IsMovieDone(): movie.GoToBeginningOfMovie() frame_timer.print_histogram()
#!/usr/bin/env python """Display quicktime movie.""" import os import VisionEgg from VisionEgg.Core import * from VisionEgg.Text import * from VisionEgg.Textures import * from VisionEgg.QuickTime import * screen = get_default_screen() screen.set(bgcolor=(0,0,0)) filename = os.path.join(VisionEgg.config.VISIONEGG_SYSTEM_DIR,"data","water.mov") movie = Movie(filename) left, bottom, right, top = movie.get_box() width,height = abs(right-left), abs(top-bottom) scale_x = screen.size[0]/float(width) scale_y = screen.size[1]/float(height) scale = min(scale_x,scale_y) # maintain aspect ratio movie_texture = MovieTexture(movie=movie) stimulus = TextureStimulus( texture=movie_texture, position = (screen.size[0]/2.0,screen.size[1]/2.0), anchor = 'center', mipmaps_enabled = False, # can't do mipmaps with QuickTime movies shrink_texture_ok = True, size = (width*scale, height*scale), ) text = Text( text = "Vision Egg QuickTime movie demo - Press any key to quit", position = (screen.size[0]/2,screen.size[1]), anchor = 'top', color = (1.0, 1.0, 1.0), ) viewport = Viewport(screen=screen, stimuli=[stimulus, text]) movie.start() frame_timer = FrameTimer() while not pygame.event.peek((pygame.locals.QUIT, pygame.locals.KEYDOWN, pygame.locals.MOUSEBUTTONDOWN)): movie.task() screen.clear() viewport.draw() swap_buffers() # display the frame we've drawn in back buffer frame_timer.tick() if movie.is_done(): movie.go_to_beginning() frame_timer.print_histogram()
lgpl-2.1
Python
c92caa1f00c984cf839ccf7c645d207e100eb874
Add test_invalid_image to test_image_validation module
mozilla-services/pageshot,mozilla-services/screenshots,mozilla-services/screenshots,mozilla-services/pageshot,mozilla-services/screenshots,mozilla-services/pageshot,mozilla-services/pageshot,mozilla-services/screenshots
test/server/test_image_validation.py
test/server/test_image_validation.py
from urlparse import urljoin from clientlib import ( make_example_shot, make_random_id, screenshots_session, example_images ) import random, string # Hack to make this predictable: random.seed(0) def test_invalid_image_url(): with screenshots_session() as user: shot_id = make_random_id() + "/test.com" shot_data = urljoin(user.backend, "data/" + shot_id) shot_json = make_example_shot(user.deviceId) invalid_url = "https://example.com/?aaA=bbb=\"); background-color: red;" for clip_id in shot_json['clips']: shot_json['clips'][clip_id]['image']['url'] = invalid_url break resp = user.session.put( shot_data, json=shot_json, ) print(resp.text) assert resp.status_code == 500 # assertion failure on clip image url def test_invalid_data_image(): with screenshots_session() as user: shot_id = make_random_id() + "/test.com" shot_data = urljoin(user.backend, "data/" + shot_id) shot_json = make_example_shot(user.deviceId) valid_data_image = example_images['url'] if "iVBORw0KGgo" in valid_data_image: print(valid_data_image) def test_invalid_data_image_decoded(): pass def test_invalid_data_url(): pass if __name__ == "__main__": test_invalid_data_image() test_invalid_data_image_decoded() test_invalid_data_url()
from urlparse import urljoin from clientlib import ( make_example_shot, make_random_id, screenshots_session, example_images ) import random # Hack to make this predictable: random.seed(0) def test_invalid_image_url(): with screenshots_session() as user: shot_id = make_random_id() + "/test.com" shot_data = urljoin(user.backend, "data/" + shot_id) shot_json = make_example_shot(user.deviceId) invalid_url = "https://example.com/?aaA=bbb=\"); background-color: red;" for clip_id in shot_json['clips']: shot_json['clips'][clip_id]['image']['url'] = invalid_url break resp = user.session.put( shot_data, json=shot_json, ) print(resp.text) assert resp.status_code == 500 # assertion failure on clip image url def test_invalid_data_image(): with screenshots_session() as user: shot_url = user.create_shot(docTitle="TEST_JPEG", image_content_type="application/pdf", image_index=0) shot_page = user.read_shot(shot_url) assert shot_page["clip_content_type"] != "image/jpeg" def test_invalid_data_image_decoded(): pass def test_invalid_data_url(): pass if __name__ == "__main__": test_invalid_data_image() test_invalid_data_image_decoded() test_invalid_data_url()
mpl-2.0
Python
33121b74419e9913e46e183914805d4a9db8f742
fix test to look for email instead of username
nicole-a-tesla/meetup.pizza,nicole-a-tesla/meetup.pizza
meetuppizza/tests.py
meetuppizza/tests.py
from django.test import TestCase from django.contrib.auth.models import User from django.test import Client from meetuppizza.forms import RegistrationForm import pdb class Test(TestCase): def setUp(self): self.params = { 'username':'Bjorn', 'email':'[email protected]', 'password1':'bjornbjorn', 'password2':'bjornbjorn' } def test_landing_page_is_there(self): response = self.client.get('/') self.assertEqual(response.status_code, 200) def test_page_contains_pizza(self): response = self.client.get('/') self.assertContains(response, "pizza") def test_signup_redirects(self): response = self.client.post('/sign_up', self.params, follow=True) self.assertRedirects(response, '/welcome') def test_user_is_created(self): c = Client() c.post('/sign_up', self.params) self.assertEqual(1, len(User.objects.all())) def test_user_is_logged_in_after_signup(self): c = Client() c.post('/sign_up', self.params) user = User.objects.get(username='Bjorn') self.assertTrue(user.is_authenticated()) def test_email_displayed_on_welcome_page(self): c = Client() c.post('/sign_up', self.params) response = c.get('/welcome') self.assertContains(response, "[email protected]")
from django.test import TestCase from django.contrib.auth.models import User from django.test import Client from meetuppizza.forms import RegistrationForm import pdb class Test(TestCase): def setUp(self): self.params = { 'username':'Bjorn', 'email':'[email protected]', 'password1':'bjornbjorn', 'password2':'bjornbjorn' } def test_landing_page_is_there(self): response = self.client.get('/') self.assertEqual(response.status_code, 200) def test_page_contains_pizza(self): response = self.client.get('/') self.assertContains(response, "pizza") def test_signup_redirects(self): response = self.client.post('/sign_up', self.params, follow=True) self.assertRedirects(response, '/welcome') def test_user_is_created(self): c = Client() c.post('/sign_up', self.params) self.assertEqual(1, len(User.objects.all())) def test_user_is_logged_in_after_signup(self): c = Client() c.post('/sign_up', self.params) user = User.objects.get(username='Bjorn') self.assertTrue(user.is_authenticated()) def test_email_displayed_on_welcome_page(self): c = Client() c.post('/sign_up', self.params) response = c.get('/welcome') self.assertContains(response, "Bjorn")
mit
Python
bf4cf008fb8eadd5a0b8b23a330a49fdea272314
Convert exception to string
Kitware/cumulus,Kitware/cumulus
tests/cases/cloud_provider_test.py
tests/cases/cloud_provider_test.py
import unittest import os from cumulus.ansible.tasks.providers import CloudProvider, EC2Provider class CloudProviderTestCase(unittest.TestCase): def setup(self): pass def tearDown(self): pass def test_empty_profile(self): with self.assertRaises(AssertionError) as context: p = CloudProvider({}) self.assertTrue('Profile does not have a "cloudProvider" attribute' in str(context.exception)) def test_ec2_profile(self): p = CloudProvider({'cloudProvider': 'ec2'}) self.assertTrue(isinstance(p, EC2Provider))
import unittest import os from cumulus.ansible.tasks.providers import CloudProvider, EC2Provider class CloudProviderTestCase(unittest.TestCase): def setup(self): pass def tearDown(self): pass def test_empty_profile(self): with self.assertRaises(AssertionError) as context: p = CloudProvider({}) self.assertTrue('Profile does not have a "cloudProvider" attribute' in context.exception) def test_ec2_profile(self): p = CloudProvider({'cloudProvider': 'ec2'}) self.assertTrue(isinstance(p, EC2Provider))
apache-2.0
Python
0ac1cdfd59199d3c36ddbccc7c5004261b57f7be
Add api.python.failing_step
shishkander/recipes-py,shishkander/recipes-py,luci/recipes-py,luci/recipes-py
recipe_modules/python/api.py
recipe_modules/python/api.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from slave import recipe_api from slave import recipe_util import textwrap class PythonApi(recipe_api.RecipeApi): def __call__(self, name, script, args=None, unbuffered=True, **kwargs): """Return a step to run a python script with arguments.""" cmd = ['python'] if unbuffered: cmd.append('-u') cmd.append(script) return self.m.step(name, cmd + list(args or []), **kwargs) def inline(self, name, program, add_python_log=True, **kwargs): """Run an inline python program as a step. Program is output to a temp file and run when this step executes. """ program = textwrap.dedent(program) compile(program, '<string>', 'exec', dont_inherit=1) try: self(name, self.m.raw_io.input(program, '.py'), **kwargs) finally: result = self.m.step.active_result if add_python_log: result.presentation.logs['python.inline'] = program.splitlines() return result def failing_step(self, name, text): """Return a failng step (correctly recognized in expectations).""" try: self.inline(name, 'import sys; sys.exit(1)', add_python_log=False, step_test_data=lambda: self.m.raw_io.test_api.output( text, retcode=1)) finally: self.m.step.active_result.presentation.step_text = text
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from slave import recipe_api from slave import recipe_util import textwrap class PythonApi(recipe_api.RecipeApi): def __call__(self, name, script, args=None, unbuffered=True, **kwargs): """Return a step to run a python script with arguments.""" cmd = ['python'] if unbuffered: cmd.append('-u') cmd.append(script) return self.m.step(name, cmd + list(args or []), **kwargs) def inline(self, name, program, add_python_log=True, **kwargs): """Run an inline python program as a step. Program is output to a temp file and run when this step executes. """ program = textwrap.dedent(program) compile(program, '<string>', 'exec', dont_inherit=1) try: self(name, self.m.raw_io.input(program, '.py'), **kwargs) finally: result = self.m.step.active_result if add_python_log: result.presentation.logs['python.inline'] = program.splitlines() return result
bsd-3-clause
Python
b1653d7a9589766a86141034865d9023b1f75fad
Fix as_tensor_test
ajbouh/tfi
tests/as_tensor_test.py
tests/as_tensor_test.py
import unittest from tfi.as_tensor import as_tensor from functools import partialmethod class AsTensorTest(unittest.TestCase): pass _FIXTURES = [ ('string', 'string', [], str, 'string'), ('list', ['string'], [None], str, ['string']), ('list', ['string'], [1], str, ['string']), ('generator', (s for s in ['string']), [1], str, ['string']), ('emptylist', [], [None], float, []), ('emptylist', [], [0], float, []), ('nested_list', [['string'], ['foo']], [2,1], str, [['string'], ['foo']]), ] for (name, *rest) in _FIXTURES: def do_test(self, expect, shape, dtype, data): result = as_tensor(data, shape, dtype) self.assertEqual(expect, result) setattr(AsTensorTest, 'test_%s' % name, partialmethod(do_test, *rest)) if __name__ == '__main__': unittest.main()
import unittest from as_tensor import as_tensor class AsTensorTest(unittest.TestCase): pass _FIXTURES = [ ('string', 'string', [], 'string'), ('list', ['string'], [None], ['string']), ('list', ['string'], [1], ['string']), ('generator', (s for s in ['string']), [1], ['string']), ('emptylist', [], [None], []), ('emptylist', [], [0], []), ('nested_list', [['string'], ['foo']], [2,1], [['string'], ['foo']]), ] for (name, expect, shape, data) in _FIXTURES: def do_test(self): result = as_tensor(data, shape) self.assertEqual(expect, result) setattr(AsTensorTest, 'test_%s' % name, do_test) if __name__ == '__main__': unittest.main()
mit
Python
55726db079313570fb9889ae91a4664f2e2daa98
add buttons to choose task
mesenev/top_bot_lyceum
methods/homeworks.py
methods/homeworks.py
from enum import Enum, auto from telegram.ext import CommandHandler, MessageHandler, Filters from telegram.ext.conversationhandler import ConversationHandler from telegram.message import Message from telegram.replykeyboardmarkup import ReplyKeyboardMarkup from telegram.update import Update from lyceum_api import get_check_queue from lyceum_api.issue import QueueTask from methods.auth import get_user class State(Enum): not_logged_in = auto() def handle_hw(bot, update: Update): user = get_user(update.message) if not user: update.message.reply_text('Not logged in') return ConversationHandler.END q = [QueueTask(t) for t in get_check_queue(user.sid)] tasks = [['{} -- {}'.format(t.task_title, t.student_name)] for t in q] markup = ReplyKeyboardMarkup(tasks, one_time_keyboard=True) update.message.reply_text('Выберите задание на проверку', reply_markup=markup) return ConversationHandler.END # def on_choose(bot, update): # message: Message = update.message conv_handler = ConversationHandler( entry_points=[CommandHandler('hw', handle_hw, Filters.private)], states={ # States.username: [MessageHandler(Filters.text, # handle_username, # pass_user_data=True)], # States.password: [MessageHandler(Filters.text, # handle_password, # pass_user_data=True)] }, fallbacks=[] )
from enum import Enum, auto from telegram.ext import CommandHandler, MessageHandler, Filters from telegram.ext.conversationhandler import ConversationHandler from telegram.message import Message from telegram.update import Update from lyceum_api import get_check_queue from lyceum_api.issue import QueueTask from methods.auth import get_user class State(Enum): not_logged_in = auto() def handle_hw(bot, update: Update): user = get_user(update.message) if not user: update.message.reply_text('Not logged in') return ConversationHandler.END q = [QueueTask(t) for t in get_check_queue(user.sid)] tasks = ('Задания на проверку:\n' + '\n'.join('{} -- {}'.format(t.task_title, t.student_name) for t in q)) update.message.reply_text(tasks) return ConversationHandler.END # def on_choose(bot, update): # message: Message = update.message conv_handler = ConversationHandler( entry_points=[CommandHandler('hw', handle_hw, Filters.private)], states={ # States.username: [MessageHandler(Filters.text, # handle_username, # pass_user_data=True)], # States.password: [MessageHandler(Filters.text, # handle_password, # pass_user_data=True)] }, fallbacks=[] )
mit
Python
fd61f3cfbcd520b1b5fc9208c553ee946cced517
Remove duplicates from compression levels tests
python-lz4/python-lz4,python-lz4/python-lz4
tests/frame/conftest.py
tests/frame/conftest.py
import pytest # import random import lz4.frame as lz4frame @pytest.fixture( params=[ (lz4frame.BLOCKSIZE_DEFAULT), (lz4frame.BLOCKSIZE_MAX64KB), (lz4frame.BLOCKSIZE_MAX256KB), (lz4frame.BLOCKSIZE_MAX1MB), (lz4frame.BLOCKSIZE_MAX4MB), ] ) def block_size(request): return request.param @pytest.fixture( params=[ (lz4frame.BLOCKMODE_LINKED), (lz4frame.BLOCKMODE_INDEPENDENT), ] ) def block_mode(request): return request.param @pytest.fixture( params=[ (lz4frame.CONTENTCHECKSUM_DISABLED), (lz4frame.CONTENTCHECKSUM_ENABLED), ] ) def content_checksum(request): return request.param compression_levels = list(range(-5, 13)) + [ lz4frame.COMPRESSIONLEVEL_MIN, lz4frame.COMPRESSIONLEVEL_MINHC, lz4frame.COMPRESSIONLEVEL_MAX, ] compression_levels = [ # Although testing with all compression levels is desirable, the number of # tests becomes too large. So, we'll select some compression levels at # random. # (i) for i in random.sample(set(compression_levels), k=2) (i) for i in set(compression_levels) ] @pytest.fixture( params=compression_levels ) def compression_level(request): return request.param @pytest.fixture( params=[ (True), (False) ] ) def auto_flush(request): return request.param @pytest.fixture( params=[ (True), (False) ] ) def store_size(request): return request.param
import pytest # import random import lz4.frame as lz4frame @pytest.fixture( params=[ (lz4frame.BLOCKSIZE_DEFAULT), (lz4frame.BLOCKSIZE_MAX64KB), (lz4frame.BLOCKSIZE_MAX256KB), (lz4frame.BLOCKSIZE_MAX1MB), (lz4frame.BLOCKSIZE_MAX4MB), ] ) def block_size(request): return request.param @pytest.fixture( params=[ (lz4frame.BLOCKMODE_LINKED), (lz4frame.BLOCKMODE_INDEPENDENT), ] ) def block_mode(request): return request.param @pytest.fixture( params=[ (lz4frame.CONTENTCHECKSUM_DISABLED), (lz4frame.CONTENTCHECKSUM_ENABLED), ] ) def content_checksum(request): return request.param compression_levels = list(range(-5, 13)) + [ lz4frame.COMPRESSIONLEVEL_MIN, lz4frame.COMPRESSIONLEVEL_MINHC, lz4frame.COMPRESSIONLEVEL_MAX, ] compression_levels = [ # Although testing with all compression levels is desirable, the number of # tests becomes too large. So, we'll select some compression levels at # random. # (i) for i in random.sample(set(compression_levels), k=2) (i) for i in compression_levels ] @pytest.fixture( params=compression_levels ) def compression_level(request): return request.param @pytest.fixture( params=[ (True), (False) ] ) def auto_flush(request): return request.param @pytest.fixture( params=[ (True), (False) ] ) def store_size(request): return request.param
bsd-3-clause
Python
1c6ed4130baacf0d0f662b6aa056630dd7fd383d
Fix vocab splitting
psmit/kaldi-recipes,psmit/kaldi-recipes,phsmit/kaldi-recipes,psmit/kaldi-recipes,phsmit/kaldi-recipes
spraakbanken/s5/spr_local/make_recog_vocab.py
spraakbanken/s5/spr_local/make_recog_vocab.py
#!/usr/bin/env python3 import sys import collections def main(in_vocab, size, out_vocab,): counter = collections.Counter() size = int(size) for line in open(in_vocab, encoding='utf-8'): word, count = line.rstrip("\n").split(" ") if any(x.isdigit() for x in word): continue punctuation = "\\/?.,!;:\"\'()-=+[]%§*¤ïÐ$&<>#@{}" if any(x in punctuation for x in word): continue counter[word] += int(count) with open(out_vocab, 'w', encoding='utf-8') as out_f: for w, c in counter.most_common(size): print(w, file=out_f) if __name__ == "__main__": if len(sys.argv) != 4: exit("3 arguments: in_vocab, desired_size, out_vocab") main(*sys.argv[1:])
#!/usr/bin/env python3 import sys import collections def main(in_vocab, size, out_vocab,): counter = collections.Counter() size = int(size) for line in open(in_vocab, encoding='utf-8'): word, count = line.strip().split() if any(x.isdigit() for x in word): continue punctuation = "\\/?.,!;:\"\'()-=+[]%§*¤ïÐ$&<>#@{}" if any(x in punctuation for x in word): continue counter[word] += int(count) with open(out_vocab, 'w', encoding='utf-8') as out_f: for w, c in counter.most_common(size): print(w, file=out_f) if __name__ == "__main__": if len(sys.argv) != 4: exit("3 arguments: in_vocab, desired_size, out_vocab") main(*sys.argv[1:])
apache-2.0
Python
7be728d551d7d2becd70b575f95facbbd561e69b
Add latest version of libsigsegv (#3449)
iulian787/spack,krafczyk/spack,skosukhin/spack,krafczyk/spack,tmerrick1/spack,krafczyk/spack,EmreAtes/spack,krafczyk/spack,TheTimmy/spack,LLNL/spack,mfherbst/spack,lgarren/spack,EmreAtes/spack,TheTimmy/spack,EmreAtes/spack,matthiasdiener/spack,lgarren/spack,TheTimmy/spack,EmreAtes/spack,matthiasdiener/spack,tmerrick1/spack,LLNL/spack,TheTimmy/spack,LLNL/spack,tmerrick1/spack,matthiasdiener/spack,iulian787/spack,EmreAtes/spack,lgarren/spack,mfherbst/spack,TheTimmy/spack,skosukhin/spack,LLNL/spack,tmerrick1/spack,lgarren/spack,matthiasdiener/spack,mfherbst/spack,skosukhin/spack,lgarren/spack,mfherbst/spack,iulian787/spack,mfherbst/spack,tmerrick1/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,krafczyk/spack,iulian787/spack,LLNL/spack,skosukhin/spack
var/spack/repos/builtin/packages/libsigsegv/package.py
var/spack/repos/builtin/packages/libsigsegv/package.py
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libsigsegv(AutotoolsPackage): """GNU libsigsegv is a library for handling page faults in user mode.""" homepage = "https://www.gnu.org/software/libsigsegv/" url = "https://ftp.gnu.org/gnu/libsigsegv/libsigsegv-2.11.tar.gz" patch('patch.new_config_guess', when='@2.10') version('2.11', 'a812d9481f6097f705599b218eea349f') version('2.10', '7f96fb1f65b3b8cbc1582fb7be774f0f') def configure_args(self): return ['--enable-shared']
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libsigsegv(AutotoolsPackage): """GNU libsigsegv is a library for handling page faults in user mode.""" homepage = "https://www.gnu.org/software/libsigsegv/" url = "https://ftp.gnu.org/gnu/libsigsegv/libsigsegv-2.10.tar.gz" patch('patch.new_config_guess', when='@2.10') version('2.10', '7f96fb1f65b3b8cbc1582fb7be774f0f') def configure_args(self): return ['--enable-shared']
lgpl-2.1
Python
20a89ca326712058f3f22621eed725c0f510bee3
Add branch with bugfix (#8355)
mfherbst/spack,iulian787/spack,mfherbst/spack,krafczyk/spack,tmerrick1/spack,krafczyk/spack,tmerrick1/spack,mfherbst/spack,iulian787/spack,iulian787/spack,matthiasdiener/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,krafczyk/spack,tmerrick1/spack,mfherbst/spack,matthiasdiener/spack,matthiasdiener/spack,LLNL/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,krafczyk/spack,iulian787/spack,LLNL/spack,LLNL/spack
var/spack/repos/builtin/packages/meraculous/package.py
var/spack/repos/builtin/packages/meraculous/package.py
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Meraculous(CMakePackage): """Meraculous is a while genome assembler for Next Generation Sequencing data geared for large genomes.""" homepage = "http://jgi.doe.gov/data-and-tools/meraculous/" url = "https://downloads.sourceforge.net/project/meraculous20/Meraculous-v2.2.4.tar.gz" version('2.2.5.1', git="https://bitbucket.org/berkeleylab/genomics-meraculous2.git", branch="release-2.2.5.1") version('2.2.4', '349feb6cb178643a46e4b092c87bad3a') depends_on('perl', type=('build', 'run')) depends_on('[email protected]:') depends_on('[email protected]:') depends_on('perl-log-log4perl', type=('build', 'run')) conflicts('%[email protected]:', when='@2.2.4') def patch(self): edit = FileFilter('CMakeLists.txt') edit.filter("-static-libstdc\+\+", "") def setup_environment(self, spack_env, run_env): run_env.set('MERACULOUS_ROOT', self.prefix) run_env.prepend_path('PERL5LIB', self.prefix.lib)
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Meraculous(CMakePackage): """Meraculous is a while genome assembler for Next Generation Sequencing data geared for large genomes.""" homepage = "http://jgi.doe.gov/data-and-tools/meraculous/" url = "https://downloads.sourceforge.net/project/meraculous20/Meraculous-v2.2.4.tar.gz" version('2.2.4', '349feb6cb178643a46e4b092c87bad3a') depends_on('perl', type=('build', 'run')) depends_on('[email protected]:') depends_on('[email protected]:') depends_on('perl-log-log4perl', type=('build', 'run')) conflicts('%[email protected]:', when='@2.2.4') def patch(self): edit = FileFilter('CMakeLists.txt') edit.filter("-static-libstdc\+\+", "") def setup_environment(self, spack_env, run_env): run_env.set('MERACULOUS_ROOT', self.prefix) run_env.prepend_path('PERL5LIB', self.prefix.lib)
lgpl-2.1
Python
5d608a855132f0a378e44b3c0c7dbba1f4f4dace
fix corehq.messaging.smsbackends.twilio.tests.test_log_call:TwilioLogCallTestCase.test_log_call
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/messaging/smsbackends/twilio/tests/test_log_call.py
corehq/messaging/smsbackends/twilio/tests/test_log_call.py
from __future__ import absolute_import from __future__ import unicode_literals import corehq.apps.ivr.tests.util as util from corehq.apps.ivr.models import Call from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend from corehq.messaging.smsbackends.twilio.views import IVR_RESPONSE from django.test import Client class TwilioLogCallTestCase(util.LogCallTestCase): def setUp(self): super(TwilioLogCallTestCase, self).setUp() self.backend = SQLTwilioBackend.objects.create( name='TWILIO', is_global=True, hq_api_id=SQLTwilioBackend.get_api_id() ) def tearDown(self): self.backend.delete() super(TwilioLogCallTestCase, self).tearDown() def test_401_response(self): with self.create_case(): start_count = Call.by_domain(self.domain).count() response = Client().post('/twilio/ivr/xxxxx', { 'From': self.phone_number, 'CallSid': 'xyz', }) self.assertEqual(response.status_code, 401) end_count = Call.by_domain(self.domain).count() self.assertEqual(start_count, end_count) def simulate_inbound_call(self, phone_number): url = '/twilio/ivr/%s' % self.backend.inbound_api_key return Client().post(url, { 'From': phone_number, 'CallSid': 'xyz', }) def check_response(self, response): self.assertEqual(response.status_code, 200) self.assertEqual(response.content.decode('utf-8'), IVR_RESPONSE)
from __future__ import absolute_import from __future__ import unicode_literals import corehq.apps.ivr.tests.util as util from corehq.apps.ivr.models import Call from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend from corehq.messaging.smsbackends.twilio.views import IVR_RESPONSE from django.test import Client class TwilioLogCallTestCase(util.LogCallTestCase): def setUp(self): super(TwilioLogCallTestCase, self).setUp() self.backend = SQLTwilioBackend.objects.create( name='TWILIO', is_global=True, hq_api_id=SQLTwilioBackend.get_api_id() ) def tearDown(self): self.backend.delete() super(TwilioLogCallTestCase, self).tearDown() def test_401_response(self): with self.create_case(): start_count = Call.by_domain(self.domain).count() response = Client().post('/twilio/ivr/xxxxx', { 'From': self.phone_number, 'CallSid': 'xyz', }) self.assertEqual(response.status_code, 401) end_count = Call.by_domain(self.domain).count() self.assertEqual(start_count, end_count) def simulate_inbound_call(self, phone_number): url = '/twilio/ivr/%s' % self.backend.inbound_api_key return Client().post(url, { 'From': phone_number, 'CallSid': 'xyz', }) def check_response(self, response): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, IVR_RESPONSE)
bsd-3-clause
Python
968f0f3d41a546c4c6614d24be3e077ba1ee37b9
Reorganiza imports de xml_utils
scieloorg/packtools,scieloorg/packtools,scieloorg/packtools
packtools/sps/utils/xml_utils.py
packtools/sps/utils/xml_utils.py
import logging import re from copy import deepcopy from lxml import etree from packtools.sps import exceptions from packtools.sps.utils import file_utils logger = logging.getLogger(__name__) class LoadToXMLError(Exception): ... def fix_xml(xml_str): return fix_namespace_prefix_w(xml_str) def fix_namespace_prefix_w(content): """ Convert os textos cujo padrão é `w:st="` em `w-st="` """ pattern = r"\bw:[a-z]{1,}=\"" found_items = re.findall(pattern, content) logger.debug("Found %i namespace prefix w", len(found_items)) for item in set(found_items): new_namespace = item.replace(":", "-") logger.debug("%s -> %s" % (item, new_namespace)) content = content.replace(item, new_namespace) return content def _get_xml_content(xml): if isinstance(xml, str): try: content = read_file(xml) except (FileNotFoundError, OSError): content = xml content = fix_xml(content) return content.encode("utf-8") return xml def get_xml_tree(content): parser = etree.XMLParser(remove_blank_text=True, no_network=True) try: content = _get_xml_content(content) xml_tree = etree.XML(content, parser) # if isinstance(content, str): # # xml_tree = etree.parse(BytesIO(content.encode("utf-8")), parser) # xml_tree = etree.parse(StringIO(content), parser) # else: # # content == zipfile.read(sps_xml_file) # except ValueError as exc: # xml_tree = etree.XML(content, parser) except etree.XMLSyntaxError as exc: raise LoadToXMLError(str(exc)) from None else: return xml_tree def tostring(node, doctype=None, pretty_print=False): return etree.tostring( node, doctype=doctype, xml_declaration=True, method="xml", encoding="utf-8", pretty_print=pretty_print, ).decode("utf-8") def node_text(node, doctype=None, pretty_print=False): items = [node.text or ""] for child in node.getchildren(): items.append( etree.tostring(child, encoding="utf-8").decode("utf-8") ) return "".join(items)
import logging import re from lxml import etree from dsm.utils.files import read_file logger = logging.getLogger(__name__) class LoadToXMLError(Exception): ... def fix_xml(xml_str): return fix_namespace_prefix_w(xml_str) def fix_namespace_prefix_w(content): """ Convert os textos cujo padrão é `w:st="` em `w-st="` """ pattern = r"\bw:[a-z]{1,}=\"" found_items = re.findall(pattern, content) logger.debug("Found %i namespace prefix w", len(found_items)) for item in set(found_items): new_namespace = item.replace(":", "-") logger.debug("%s -> %s" % (item, new_namespace)) content = content.replace(item, new_namespace) return content def _get_xml_content(xml): if isinstance(xml, str): try: content = read_file(xml) except (FileNotFoundError, OSError): content = xml content = fix_xml(content) return content.encode("utf-8") return xml def get_xml_tree(content): parser = etree.XMLParser(remove_blank_text=True, no_network=True) try: content = _get_xml_content(content) xml_tree = etree.XML(content, parser) # if isinstance(content, str): # # xml_tree = etree.parse(BytesIO(content.encode("utf-8")), parser) # xml_tree = etree.parse(StringIO(content), parser) # else: # # content == zipfile.read(sps_xml_file) # except ValueError as exc: # xml_tree = etree.XML(content, parser) except etree.XMLSyntaxError as exc: raise LoadToXMLError(str(exc)) from None else: return xml_tree def tostring(node, doctype=None, pretty_print=False): return etree.tostring( node, doctype=doctype, xml_declaration=True, method="xml", encoding="utf-8", pretty_print=pretty_print, ).decode("utf-8") def node_text(node, doctype=None, pretty_print=False): items = [node.text or ""] for child in node.getchildren(): items.append( etree.tostring(child, encoding="utf-8").decode("utf-8") ) return "".join(items)
bsd-2-clause
Python
713fd67b4aa0d3a614ca149f86deeb2d5e913d12
fix installation on linux (#24706)
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
var/spack/repos/builtin/packages/py-keyring/package.py
var/spack/repos/builtin/packages/py-keyring/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyKeyring(PythonPackage): """The Python keyring library provides an easy way to access the system keyring service from python. It can be used in any application that needs safe password storage.""" homepage = "https://github.com/jaraco/keyring" pypi = "keyring/keyring-23.0.1.tar.gz" version('23.0.1', sha256='045703609dd3fccfcdb27da201684278823b72af515aedec1a8515719a038cb8') depends_on('[email protected]:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('[email protected]:+toml', type='build') depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run'), when='platform=linux') depends_on('[email protected]:', type=('build', 'run'), when='platform=linux') # TODO: additional dependency on pywin32-ctypes required for Windows
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyKeyring(PythonPackage): """The Python keyring library provides an easy way to access the system keyring service from python. It can be used in any application that needs safe password storage.""" homepage = "https://github.com/jaraco/keyring" pypi = "keyring/keyring-23.0.1.tar.gz" version('23.0.1', sha256='045703609dd3fccfcdb27da201684278823b72af515aedec1a8515719a038cb8') depends_on('[email protected]:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('[email protected]:+toml', type='build') depends_on('[email protected]:', type=('build', 'run')) # TODO: additional dependencies required for Windows/Linux
lgpl-2.1
Python
f06a4689fab32d7d2f4c848019978665656a5cdf
Implement hexfile record types used by GNU ld
thotypous/mikroe-uhb
mikroeuhb/hexfile.py
mikroeuhb/hexfile.py
import struct, logging from binascii import unhexlify from util import bord logger = logging.getLogger(__name__) def load(f, devkit): """Load a Intel HEX File from a file object into a devkit. The devkit must implement a write(address,data) method.""" lineno = 0 base_addr = 0 for line in f.xreadlines(): lineno += 1 line = line.strip() if line == '': continue if bord(line[0]) != ord(':'): raise IOError('line %d: malformed' % lineno) line = unhexlify(line[1:]) byte_count, address, record_type = struct.unpack('>BHB', line[:4]) correct_len = byte_count + 5 if len(line) != correct_len: logger.warn('line %d: should have %d bytes -- truncating' % (lineno, correct_len)) line = line[:correct_len] if sum(map(bord,line)) & 0xFF != 0: raise IOError('line %d: incorrect checksum' % lineno) data = line[4:-1] if record_type == 0x00: # data record devkit.write(base_addr + address, data) elif record_type == 0x01: # end of file record break elif record_type == 0x04: # extended linear address record if byte_count != 2: raise IOError('line %d: extended linear address record must have 2 bytes of data' % lineno) base_addr, = struct.unpack('>H', data) base_addr <<= 16 elif record_type == 0x02: # extended segment address record base_addr, = struct.unpack('>H', data) base_addr <<= 4 elif record_type not in [0x03, 0x05]: # used for the initial PC (ignored) raise IOError('line %d: unsupported record type %d' % (lineno, record_type))
import struct, logging from binascii import unhexlify from util import bord logger = logging.getLogger(__name__) def load(f, devkit): """Load a Intel HEX File from a file object into a devkit. The devkit must implement a write(address,data) method.""" lineno = 0 base_addr = 0 for line in f.xreadlines(): lineno += 1 line = line.strip() if line == '': continue if bord(line[0]) != ord(':'): raise IOError('line %d: malformed' % lineno) line = unhexlify(line[1:]) byte_count, address, record_type = struct.unpack('>BHB', line[:4]) correct_len = byte_count + 5 if len(line) != correct_len: logger.warn('line %d: should have %d bytes -- truncating' % (lineno, correct_len)) line = line[:correct_len] if sum(map(bord,line)) & 0xFF != 0: raise IOError('line %d: incorrect checksum' % lineno) data = line[4:-1] if record_type == 0x00: # data record devkit.write(base_addr + address, data) elif record_type == 0x01: # end of file record break elif record_type == 0x04: # extended linear address record if byte_count != 2: raise IOError('line %d: extended linear address record must have 2 bytes of data' % lineno) base_addr, = struct.unpack('>H', data) base_addr <<= 16 else: raise IOError('line %d: unsupported record type %d' % (lineno, record_type))
mit
Python
b9b3837937341e6b1b052bbfdd979e3bb57d87c4
Fix SSL security provider integration tests
rafaduran/python-mcollective,rafaduran/python-mcollective,rafaduran/python-mcollective,rafaduran/python-mcollective
tests/integration/test_with_ssl.py
tests/integration/test_with_ssl.py
import os from pymco.test import ctxt from . import base FIXTURES_PATH = os.path.join(ctxt.ROOT, 'fixtures') class SSLTestCase(base.IntegrationTestCase): '''RabbitMQ integration test case.''' CTXT = { 'plugin.activemq.pool.1.port': 61614, 'plugin.activemq.pool.1.password': 'marionette', 'plugin.ssl_server_public': 'tests/fixtures/server-public.pem', 'plugin.ssl_client_private': 'tests/fixtures/client-private.pem', 'plugin.ssl_client_public': 'tests/fixtures/client-public.pem', 'plugin.ssl_server_private': os.path.join(FIXTURES_PATH, 'server-private.pem'), 'securityprovider': 'ssl', 'plugin.ssl_client_cert_dir': FIXTURES_PATH, } class TestWithSSLMCo20x(base.MCollective20x, SSLTestCase): '''MCollective integration test case.''' class TestWithSSLMCo22x(base.MCollective22x, SSLTestCase): '''MCollective integration test case.''' class TestWithSSLMCo23x(base.MCollective23x, SSLTestCase): '''MCollective integration test case.'''
from . import base class SSLTestCase(base.IntegrationTestCase): '''RabbitMQ integration test case.''' CTXT = { 'plugin.activemq.pool.1.port': 61614, 'plugin.activemq.pool.1.password': 'marionette', 'plugin.ssl_server_public': 'tests/fixtures/server-public.pem', 'plugin.ssl_client_private': 'tests/fixtures/client-private.pem', 'plugin.ssl_client_public': 'tests/fixtures/client-public.pem', } class TestWithSSLMCo20x(base.MCollective20x, SSLTestCase): '''MCollective integration test case.''' class TestWithSSLMCo22x(base.MCollective22x, SSLTestCase): '''MCollective integration test case.''' class TestWithSSLMCo23x(base.MCollective23x, SSLTestCase): '''MCollective integration test case.'''
bsd-3-clause
Python
dcc07355786f94da36d938239c5c60d5302e4d42
test for identity link
dandesousa/drf-collection-json
testapp/tests/test_renderer_infer.py
testapp/tests/test_renderer_infer.py
#!/usr/bin/env python # encoding: utf-8 from django.test import TestCase from collection_json import Collection from testapp.models import Person try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse class DictionaryTest(TestCase): """tests when the response contains a dictionary""" def test_no_serializer_view(self): with self.assertRaises(TypeError): self.client.get("/infer/noserializer") class PersonTest(TestCase): def setUp(self): self.num_people = 10 for i in range(self.num_people): p = Person.objects.create(name="person{}".format(i), address="address{}".format(i)) p.save() self.url = "/infer/person" response = self.client.get(self.url) content = response.content.decode("utf-8") self.collection = Collection.from_json(content) def test_db_setup(self): """asserts that the database was properly initialized""" self.assertEqual(self.num_people, len(Person.objects.all())) def test_collection_items(self): """asserts that the right number of items was parsed""" self.assertEqual(self.num_people, len(self.collection.items)) def test_collection_names(self): """tests that the given attribute was parsed correctly""" for i, item in enumerate(self.collection.items): expected = "person{}".format(i) self.assertEqual(item.name.value, expected) def test_collection_address(self): """tests that the given attribute was parsed correctly""" for i, item in enumerate(self.collection.items): expected = "address{}".format(i) self.assertEqual(item.address.value, expected) def test_collection_identity_link(self): """tests that the href for the collection is correct""" actual = urlparse(self.collection.href).path self.assertEqual(actual, self.url) class ListTest(TestCase): """tests when the response contains a list""" urls = "testapp.urls"
#!/usr/bin/env python # encoding: utf-8 from django.test import TestCase from collection_json import Collection from testapp.models import Person class DictionaryTest(TestCase): """tests when the response contains a dictionary""" def test_no_serializer_view(self): with self.assertRaises(TypeError): self.client.get("/infer/noserializer") class PersonTest(TestCase): def setUp(self): self.num_people = 10 for i in range(self.num_people): p = Person.objects.create(name="person{}".format(i), address="address{}".format(i)) p.save() response = self.client.get("/infer/person") content = response.content.decode("utf-8") self.collection = Collection.from_json(content) def test_db_setup(self): """asserts that the database was properly initialized""" self.assertEqual(self.num_people, len(Person.objects.all())) def test_collection_items(self): """asserts that the right number of items was parsed""" self.assertEqual(self.num_people, len(self.collection.items)) def test_collection_names(self): """tests that the given attribute was parsed correctly""" for i, item in enumerate(self.collection.items): expected = "person{}".format(i) self.assertEqual(item.name.value, expected) def test_collection_address(self): """tests that the given attribute was parsed correctly""" for i, item in enumerate(self.collection.items): expected = "address{}".format(i) self.assertEqual(item.address.value, expected) class ListTest(TestCase): """tests when the response contains a list""" urls = "testapp.urls"
cc0-1.0
Python
943ecc39af2b152bc8d5fed55bdafe5332a33d75
remove xfail (#4458)
kubeflow/kubeflow,kubeflow/kubeflow,kubeflow/kubeflow,kubeflow/kubeflow,kubeflow/kubeflow,kubeflow/kubeflow
testing/kfctl/endpoint_ready_test.py
testing/kfctl/endpoint_ready_test.py
import datetime import json import logging import os import subprocess import tempfile import uuid from retrying import retry import pytest from kubeflow.testing import util from testing import deploy_utils from testing import gcp_util # There's really no good reason to run test_endpoint during presubmits. # We shouldn't need it to feel confident that kfctl is working. @pytest.mark.skipif(os.getenv("JOB_TYPE") == "presubmit", reason="test endpoint doesn't run in presubmits") def test_endpoint_is_ready(record_xml_attribute, project, app_path, app_name, use_basic_auth): """Test that Kubeflow was successfully deployed. Args: project: The gcp project that we deployed kubeflow app_name: The name of the kubeflow deployment """ util.set_pytest_junit(record_xml_attribute, "test_endpoint_is_ready") url = "https://{}.endpoints.{}.cloud.goog".format(app_name, project) if use_basic_auth: with open(os.path.join(app_path, "login.json"), "r") as f: login = json.load(f) # Let it fail if login info cannot be found. username = login["KUBEFLOW_USERNAME"] password = login["KUBEFLOW_PASSWORD"] if not gcp_util.basic_auth_is_ready(url, username, password): raise Exception("Basic auth endpoint is not ready") else: # Owned by project kubeflow-ci-deployment. os.environ["CLIENT_ID"] = "29647740582-7meo6c7a9a76jvg54j0g2lv8lrsb4l8g.apps.googleusercontent.com" if not gcp_util.iap_is_ready(url): raise Exception("IAP endpoint is not ready") if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(pathname)s|%(lineno)d| %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) pytest.main()
import datetime import json import logging import os import subprocess import tempfile import uuid from retrying import retry import pytest from kubeflow.testing import util from testing import deploy_utils from testing import gcp_util # TODO(https://github.com/kubeflow/kfctl/issues/42): # Test is failing pretty consistently. @pytest.mark.xfail # There's really no good reason to run test_endpoint during presubmits. # We shouldn't need it to feel confident that kfctl is working. @pytest.mark.skipif(os.getenv("JOB_TYPE") == "presubmit", reason="test endpoint doesn't run in presubmits") def test_endpoint_is_ready(record_xml_attribute, project, app_path, app_name, use_basic_auth): """Test that Kubeflow was successfully deployed. Args: project: The gcp project that we deployed kubeflow app_name: The name of the kubeflow deployment """ util.set_pytest_junit(record_xml_attribute, "test_endpoint_is_ready") url = "https://{}.endpoints.{}.cloud.goog".format(app_name, project) if use_basic_auth: with open(os.path.join(app_path, "login.json"), "r") as f: login = json.load(f) # Let it fail if login info cannot be found. username = login["KUBEFLOW_USERNAME"] password = login["KUBEFLOW_PASSWORD"] if not gcp_util.basic_auth_is_ready(url, username, password): raise Exception("Basic auth endpoint is not ready") else: # Owned by project kubeflow-ci-deployment. os.environ["CLIENT_ID"] = "29647740582-7meo6c7a9a76jvg54j0g2lv8lrsb4l8g.apps.googleusercontent.com" if not gcp_util.iap_is_ready(url): raise Exception("IAP endpoint is not ready") if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format=('%(levelname)s|%(asctime)s' '|%(pathname)s|%(lineno)d| %(message)s'), datefmt='%Y-%m-%dT%H:%M:%S', ) logging.getLogger().setLevel(logging.INFO) pytest.main()
apache-2.0
Python
291681041f434a981a54371bb7f9f1fa9637afb7
improve comment collapse
dresl/django_choice_and_question,dresl/django_choice_and_question
polls/admin.py
polls/admin.py
from django.contrib import admin from polls.models import Choice, Question class ChoiceInline(admin.TabularInline): model = Choice extra = 3 class QuestionAdmin(admin.ModelAdmin): fieldsets = [ (None, {'fields': ['question_text']}), ('Date information', {'fields': ['pub_date'], #'classes': ['collapse'] }), ] inlines = [ChoiceInline] list_display = ('question_text', 'pub_date', 'was_published_recently') list_filter = ['pub_date','question_text'] search_fields = ['question_text'] admin.site.register(Question, QuestionAdmin)
from django.contrib import admin from polls.models import Choice, Question class ChoiceInline(admin.TabularInline): model = Choice extra = 3 class QuestionAdmin(admin.ModelAdmin): fieldsets = [ (None, {'fields': ['question_text']}), ('Date information', {'fields': ['pub_date'], #'classes': ['collapse']}), ] inlines = [ChoiceInline] list_display = ('question_text', 'pub_date', 'was_published_recently') list_filter = ['pub_date','question_text'] search_fields = ['question_text'] admin.site.register(Question, QuestionAdmin)
apache-2.0
Python
05e496de4f6ebbb9e77c6cb1796cc1050a41a181
Adjust whitespace for pep8
tswicegood/wsgi-pratchett
pratchett/__init__.py
pratchett/__init__.py
HEADER = ("X-Clacks-Overhead", "GNU Terry Pratchett") class GNUTerryPratchett(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): def clacker(status, headers, *args, **kwargs): if HEADER not in headers: headers.append(HEADER) return start_response(status, headers, *args, **kwargs) return self.app(environ, clacker) def make_filter(global_conf): return GNUTerryPratchett
HEADER = ("X-Clacks-Overhead", "GNU Terry Pratchett") class GNUTerryPratchett(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): def clacker(status, headers, *args, **kwargs): if HEADER not in headers: headers.append(HEADER) return start_response(status, headers, *args, **kwargs) return self.app(environ, clacker) def make_filter(global_conf): return GNUTerryPratchett
apache-2.0
Python
ddb4ed6701808ed5c4e928d042b84e0c84490e58
Bump version 0.0.4
gengo/memsource-wrap,gengo/memsource-wrap
memsource/__init__.py
memsource/__init__.py
__author__ = 'Gengo' __version__ = '0.0.4' __license__ = 'MIT'
__author__ = 'Gengo' __version__ = '0.0.3' __license__ = 'MIT'
mit
Python
949c7b55e295b4d87f2d7a1bb98242cb055129d1
Solve No.140 in Python with problems
jonathanxqs/lintcode,jonathanxqs/lintcode
140.py
140.py
class Solution: """ @param a, b, n: 32bit integers @return: An integer """ def fastPower(self, a, b, n): ans = 1 while b > 0: if b % 2==1: ans = ans * a % n a = a * a % n b = b / 2 return ans % n # WA because of lintcode # AC
class Solution: """ @param a, b, n: 32bit integers @return: An integer """ def fastPower(self, a, b, n): ans = 1 while b > 0: if b % 2==1: ans = ans * a % n a = a * a % n b = b / 2 return ans % n # WA
mit
Python
d10bb3695ee93ffd8b91d4d82adaf484de9e9bf1
Rename NeuronNetwork to NeuralNetwork
tysonzero/py-ann
ANN.py
ANN.py
from random import uniform class Neuron: def __init__(self, parents=[]): self.parents = [{ 'neuron': parent, 'weight': uniform(-1, 1), 'slope': uniform(-1, 1), } for parent in parents] def calculate(self, increment=0): self.output = sum([parent['neuron'].output * (parent['weight'] + increment * parent['slope']) for parent in self.parents]) > 0 def mutate(self, increment): for parent in self.parents: parent['weight'] += increment * parent['slope'] parent['slope'] = uniform(-1, 1) class NeuralNetwork: def __init__(self, inputs, outputs, hidden, rows): self.bias = Neuron() self.neurons = [] for row in xrange(rows): self.neurons.append([]) if row == 0: for input_ in xrange(inputs): self.neurons[row].append(Neuron(parents=[])) elif row == rows - 1: for output in xrange(outputs): self.neurons[row].append(Neuron(parents=self.neurons[row - 1] + [self.bias])) else: for column in xrange(hidden): self.neurons[row].append(Neuron(parents=self.neurons[row - 1] + [self.bias])) self.bias.output = True def calculate(self, inputs, increment=0): for i, neuron_row in enumerate(self.neurons): for j, neuron in enumerate(neuron_row): if i == 0: neuron.output = inputs[j] else: neuron.calculate(increment=increment) return [neuron.output for neuron in self.neurons[-1]] def mutate(self, increment): for neuron_row in self.neurons: for neuron in neuron_row: neuron.mutate(increment=increment)
from random import uniform class Neuron: def __init__(self, parents=[]): self.parents = [{ 'neuron': parent, 'weight': uniform(-1, 1), 'slope': uniform(-1, 1), } for parent in parents] def calculate(self, increment=0): self.output = sum([parent['neuron'].output * (parent['weight'] + increment * parent['slope']) for parent in self.parents]) > 0 def mutate(self, increment): for parent in self.parents: parent['weight'] += increment * parent['slope'] parent['slope'] = uniform(-1, 1) class NeuronNetwork: def __init__(self, inputs, outputs, hidden, rows): self.bias = Neuron() self.neurons = [] for row in xrange(rows): self.neurons.append([]) if row == 0: for input_ in xrange(inputs): self.neurons[row].append(Neuron(parents=[])) elif row == rows - 1: for output in xrange(outputs): self.neurons[row].append(Neuron(parents=self.neurons[row - 1] + [self.bias])) else: for column in xrange(hidden): self.neurons[row].append(Neuron(parents=self.neurons[row - 1] + [self.bias])) self.bias.output = True def calculate(self, inputs, increment=0): for i, neuron_row in enumerate(self.neurons): for j, neuron in enumerate(neuron_row): if i == 0: neuron.output = inputs[j] else: neuron.calculate(increment=increment) return [neuron.output for neuron in self.neurons[-1]] def mutate(self, increment): for neuron_row in self.neurons: for neuron in neuron_row: neuron.mutate(increment=increment)
mit
Python
4f219c4a05a251d9958543d24891955d640bc07f
Add more realistic responses for audit logs in tests.
fulcrumapp/fulcrum-python
tests/test_audit_log.py
tests/test_audit_log.py
import httpretty from fulcrum.exceptions import NotFoundException, InternalServerErrorException from tests import FulcrumTestCase from tests.valid_objects import form as valid_form class AuditLogTest(FulcrumTestCase): @httpretty.activate def test_all(self): httpretty.register_uri(httpretty.GET, self.api_root + '/audit_logs', body='{"audit_logs":[{"source_type":"authorization","source_id":"ec4a410f-0b76-4a65-ba58-b97eed023351","action":"create","description":"Levar Burton created API token Fulcrum Query Utility","data":{"note":"Fulcrum Query Utility","token_last_8":"f816b890","user_id":"reading-rainbow","user":"Levar Burton"},"ip":"1.1.1.1","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36","location":"Austin, Texas, United States","latitude":30.3085,"longitude":-97.6849,"admin_area":"TX","country":"US","locality":"Austin","postal_code":"78723","id":"def-456","created_at":"2019-01-16T15:14:58Z","updated_at":"2019-01-16T15:14:58Z","actor":"Levar Burton","actor_id":"8a11c2b4-79fc-4503-85e4-056671c41e6f","time":"2019-01-16T15:14:58Z"},{"source_type":"choice_list","source_id":"1c0b0ea3-66cd-4b69-9fe7-20a9e9f07556","action":"create","description":"Levar Burton created choice list New Choice List","data":null,"ip":"1.1.1.1","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36","location":"Tampa, Florida, United States","latitude":27.9987,"longitude":-82.5156,"admin_area":"FL","country":"US","locality":"Tampa","postal_code":"33614","id":"ghi-789","created_at":"2019-01-22T16:11:15Z","updated_at":"2019-01-22T16:11:15Z","actor":"Levar Burton","actor_id":"094ed10f-cd99-4a58-9b4b-65ab5b31b791","time":"2019-01-22T16:11:15Z"}],"current_page":1,"total_pages":30,"total_count":60,"per_page":2}', status=200) audit_logs = self.fulcrum_api.audit_logs.search() self.assertIsInstance(audit_logs, dict) self.assertEqual(len(audit_logs['audit_logs']), 2) self.assertEqual(audit_logs['audit_logs'][0]['id'], 'def-456') self.assertEqual(audit_logs['audit_logs'][1]['id'], 'ghi-789') @httpretty.activate def test_find(self): httpretty.register_uri(httpretty.GET, self.api_root + '/audit_logs/abc-123', body='{"audit_log":{"source_type":"form","source_id":"zxy-987","action":"update","description":"Jason Sanford updated app GeoBooze - Changed:[Section:actions - YesNoField:post_to_slack];[RecordLinkField:beer_type];","data":null,"ip":"1.1.1.1","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36","location":"Ashburn, Virginia, United States","latitude":39.0481,"longitude":-77.4728,"admin_area":"VA","country":"US","locality":"Ashburn","postal_code":"20149","id":"abc-123","created_at":"2019-01-10T17:29:16Z","updated_at":"2019-01-10T17:29:16Z","actor":"George Costanza","actor_id":"abc123","time":"2019-01-10T17:29:16Z"}}', status=200) audit_log = self.fulcrum_api.audit_logs.find('abc-123') self.assertIsInstance(audit_log, dict) self.assertEqual(audit_log['audit_log']['id'], 'abc-123')
import httpretty from fulcrum.exceptions import NotFoundException, InternalServerErrorException from tests import FulcrumTestCase from tests.valid_objects import form as valid_form class AuditLogTest(FulcrumTestCase): @httpretty.activate def test_all(self): httpretty.register_uri(httpretty.GET, self.api_root + '/audit_logs', body='{"audit_logs": [{"id": 1},{"id": 2}], "total_count": 2, "current_page": 1, "total_pages": 1, "per_page": 20000}', status=200) audit_logs = self.fulcrum_api.audit_logs.search() self.assertIsInstance(audit_logs, dict) self.assertEqual(len(audit_logs['audit_logs']), 2) @httpretty.activate def test_find(self): httpretty.register_uri(httpretty.GET, self.api_root + '/audit_logs/5b656cd8-f3ef-43e9-8d22-84d015052778', body='{"record_count": 4, "description": "Food Carts and Trucks in Denver", "id": "5b656cd8-f3ef-43e9-8d22-84d015052778"}', status=200) audit_log = self.fulcrum_api.audit_logs.find('5b656cd8-f3ef-43e9-8d22-84d015052778') self.assertIsInstance(audit_log, dict) self.assertEqual(audit_log['id'], '5b656cd8-f3ef-43e9-8d22-84d015052778') @httpretty.activate def test_find_not_found(self): httpretty.register_uri(httpretty.GET, self.api_root + '/audit_logs/lobster', status=404) try: self.fulcrum_api.audit_logs.find('lobster') except Exception as exc: self.assertIsInstance(exc, NotFoundException)
apache-2.0
Python
f2cc74d79abf42c0f199c48ef9110bce6cec45b4
Update alcatel_sros_ssh.py
jumpojoy/netmiko,isidroamv/netmiko,ktbyers/netmiko,fooelisa/netmiko,rdezavalia/netmiko,rdezavalia/netmiko,ktbyers/netmiko,shamanu4/netmiko,shamanu4/netmiko,fooelisa/netmiko,isidroamv/netmiko,jumpojoy/netmiko,shsingh/netmiko,shsingh/netmiko
netmiko/alcatel/alcatel_sros_ssh.py
netmiko/alcatel/alcatel_sros_ssh.py
''' Alcatel-Lucent SROS support ''' from netmiko.ssh_connection import SSHConnection class AlcatelSrosSSH(SSHConnection): ''' SROS support ''' def session_preparation(self): self.disable_paging(command="environment no more\n") def enable(self): pass
''' Alcatel-Lucent SROS support ''' from netmiko.ssh_connection import SSHConnection class AlcatelSrosSSH(SSHConnection): ''' SROS support ''' def session_preparation(self): self.disable_paging(command="\environment no more\n") def enable(self): pass
mit
Python
91ff2ed96dc3ba197f71be935ac23796d40ef5dc
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/812a20bfa97f7b56eb3340c2f75358db58483974.
Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,frreiss/tensorflow-fred,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,paolodedios/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,yongtang/tensorflow
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "812a20bfa97f7b56eb3340c2f75358db58483974" TFRT_SHA256 = "8235d34c674a842fb08f5fc7f7b6136a1af1dbb20a2ec7213dd99848b884878f" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "c442a283246c2060d139d4cadb0f8ff59ee7e7da" TFRT_SHA256 = "649107aabf7a242678448c44d4a51d5355904222de7d454a376ad511c803cf0f" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
apache-2.0
Python
49ad9b8162a9113f3c4c69818553de2cb6bf66df
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/da541333433f74881d8f44947369756d40d5e7fe.
tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "da541333433f74881d8f44947369756d40d5e7fe" TFRT_SHA256 = "df492c902908141405e88af81c4bb72580e3a5615bd91448b7c44a2c0d29009a" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "bdb99de6e7e5fcd5a7e55895bb1c658ea0336136" TFRT_SHA256 = "a251c274cf0bbd805e221677cf4988c27156af54655b906eab11d9e3ee37d0b5" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
apache-2.0
Python
be0e9cf9a195f44a033bb8b3aeb13febf3cea9cf
Remove check in token credential (#14134)
yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli
src/azure-cli/azure/cli/command_modules/storage/oauth_token_util.py
src/azure-cli/azure/cli/command_modules/storage/oauth_token_util.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import threading from knack.log import get_logger logger = get_logger(__name__) class TokenUpdater(object): """ This class updates a given token_credential periodically using the provided callback function. It shows one way of making sure the credential does not become expired. """ def __init__(self, token_credential, cli_ctx): self.token_credential = token_credential self.cli_ctx = cli_ctx # the timer needs to be protected, as later on it is possible that one thread is setting a new timer and # another thread is trying to cancel the timer self.lock = threading.Lock() self.timer_callback() def timer_callback(self): # call to get a new token and set a timer from azure.cli.core._profile import Profile from datetime import datetime # should give back token that is valid for at least 5 mins token = Profile(cli_ctx=self.cli_ctx).get_raw_token( resource="https://storage.azure.com", subscription=self.cli_ctx.data['subscription_id'])[0][2] try: self.token_credential.token = token['accessToken'] expire = token['expiresOn'] seconds_left = (datetime.strptime(expire, "%Y-%m-%d %H:%M:%S.%f") - datetime.now()).seconds except KeyError: # needed to deal with differing unserialized MSI token payload self.token_credential.token = token['access_token'] expire = datetime.fromtimestamp(int(token['expires_on'])) seconds_left = (expire - datetime.now()).seconds if seconds_left < 180: logger.warning("Acquired token will expire on %s. Current time is %s.", expire, datetime.now()) with self.lock: self.timer = threading.Timer(seconds_left - 180, self.timer_callback) self.timer.daemon = True self.timer.start() def cancel(self): # the timer needs to be canceled once the command has finished executing # if not the timer will keep going with self.lock: self.timer.cancel()
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import threading class TokenUpdater(object): """ This class updates a given token_credential periodically using the provided callback function. It shows one way of making sure the credential does not become expired. """ def __init__(self, token_credential, cli_ctx): self.token_credential = token_credential self.cli_ctx = cli_ctx # the timer needs to be protected, as later on it is possible that one thread is setting a new timer and # another thread is trying to cancel the timer self.lock = threading.Lock() self.timer_callback() def timer_callback(self): # call to get a new token and set a timer from azure.cli.core._profile import Profile from datetime import datetime # should give back token that is valid for at least 5 mins token = Profile(cli_ctx=self.cli_ctx).get_raw_token( resource="https://storage.azure.com", subscription=self.cli_ctx.data['subscription_id'])[0][2] try: self.token_credential.token = token['accessToken'] seconds_left = (datetime.strptime(token['expiresOn'], "%Y-%m-%d %H:%M:%S.%f") - datetime.now()).seconds except KeyError: # needed to deal with differing unserialized MSI token payload self.token_credential.token = token['access_token'] seconds_left = (datetime.fromtimestamp(int(token['expires_on'])) - datetime.now()).seconds if seconds_left < 180: # acquired token expires in less than 3 mins raise Exception("Acquired a token expiring in less than 3 minutes") with self.lock: self.timer = threading.Timer(seconds_left - 180, self.timer_callback) self.timer.daemon = True self.timer.start() def cancel(self): # the timer needs to be canceled once the command has finished executing # if not the timer will keep going with self.lock: self.timer.cancel()
mit
Python
a8a0e24d9ee90676601a52c564eadb7ff264d5cd
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/356740e3a2bf884abd27b2ca362fe8108a7cd257.
karllessard/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,sarvex/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,sarvex/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,karllessard/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,sarvex/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "356740e3a2bf884abd27b2ca362fe8108a7cd257" TFRT_SHA256 = "c5c806b5f5acb345eca8db4bc49053df60d0b368193f5b78346cf6acdc4bc3e8" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "b570a1921c9e55ac53c8972bd2bfd37cd0eb510d" TFRT_SHA256 = "01295fc2a90aa2d665890adbe8701e2ae2372028d3b8266cba38ceddccb42af6" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
apache-2.0
Python
ce977d24d49b7e03b6db5b5590e8fc0ddf8e9127
fix the deploy order in the daemon. closes #862
nprapps/elections14,nprapps/elections14,nprapps/elections14,nprapps/elections14
fabfile/daemons.py
fabfile/daemons.py
#!/usr/bin/env python from time import sleep, time from fabric.api import execute, task, env import app_config import sys import traceback def safe_execute(*args, **kwargs): """ Wrap execute() so that all exceptions are caught and logged. """ try: execute(*args, **kwargs) except: print "ERROR [timestamp: %d]: Here's the traceback" % time() ex_type, ex, tb = sys.exc_info() traceback.print_tb(tb) del tb @task def deploy(): """ Harvest data and deploy slides indefinitely """ while True: start = time() safe_execute('ap.update') safe_execute('data.load_updates', 'data/update.json') safe_execute('liveblog.update') safe_execute('deploy_bop') safe_execute('deploy_big_boards') safe_execute('deploy_slides') duration = int(time() - start) wait = app_config.DEPLOY_INTERVAL - duration print "== Deploying slides ran in %ds, waiting %ds ==" % (duration, wait) if wait < 0: print "WARN: Deploying slides took %d seconds longer than %d" % (abs(wait), app_config.DEPLOY_INTERVAL) wait = 0 sleep(wait)
#!/usr/bin/env python from time import sleep, time from fabric.api import execute, task, env import app_config import sys import traceback def safe_execute(*args, **kwargs): """ Wrap execute() so that all exceptions are caught and logged. """ try: execute(*args, **kwargs) except: print "ERROR [timestamp: %d]: Here's the traceback" % time() ex_type, ex, tb = sys.exc_info() traceback.print_tb(tb) del tb @task def deploy(): """ Harvest data and deploy slides indefinitely """ while True: start = time() safe_execute('ap.update') safe_execute('data.load_updates', 'data/update.json') safe_execute('liveblog.update') safe_execute('deploy_slides') safe_execute('deploy_big_boards') safe_execute('deploy_bop') duration = int(time() - start) wait = app_config.DEPLOY_INTERVAL - duration print "== Deploying slides ran in %ds, waiting %ds ==" % (duration, wait) if wait < 0: print "WARN: Deploying slides took %d seconds longer than %d" % (abs(wait), app_config.DEPLOY_INTERVAL) wait = 0 sleep(wait)
mit
Python
7e95e0b8adb4315c8f8a0c5aa8c6ccc588cbee18
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/0d8bae2de531db2e4e4efd3a4e168b39795458b9.
tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "0d8bae2de531db2e4e4efd3a4e168b39795458b9" TFRT_SHA256 = "fa7cd1e72eec99562bf916e071222df2e72e90c67dcb14137ffbef07a4fcac5f" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "4b2fe81ea82e4c33783b5b62973fbe84dbc6f484" TFRT_SHA256 = "f0e6e0fd3e5245d993cd4146d8245e130e724d0070401a25f730b02c7296d1c4" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
apache-2.0
Python
e2d066811a5e943600c170aba0cf797c104d1588
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/5f6e52142a3592d0cfa058dbfd140cad49ed451a.
google/tsl,google/tsl,google/tsl
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "5f6e52142a3592d0cfa058dbfd140cad49ed451a" TFRT_SHA256 = "8e1efbd7df0fdeb5186b178d7c8b90c33ba80cef54999e988097bd1ff0f4e8fe" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "736eeebfb56c6d0de138f4a29286140d8c26d927" TFRT_SHA256 = "b584ee5ce5ecaadf289b0997987dfb5eec6cf3623f30b83028923cad20914e61" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
apache-2.0
Python
a70e0abdf409d770ddbb9faf3cc66c26fc03b076
fix fbproject tests following new pystan version
Kaggle/docker-python,Kaggle/docker-python
tests/test_fbprophet.py
tests/test_fbprophet.py
import unittest import numpy as np import pandas as pd from fbprophet import Prophet class TestFbProphet(unittest.TestCase): def test_fit(self): train = pd.DataFrame({ 'ds': np.array(['2012-05-18', '2012-05-20']), 'y': np.array([38.23, 21.25]) }) forecaster = Prophet(mcmc_samples=1) forecaster.fit(train, control={'adapt_engaged': False})
import unittest import numpy as np import pandas as pd from fbprophet import Prophet class TestFbProphet(unittest.TestCase): def test_fit(self): train = pd.DataFrame({ 'ds': np.array(['2012-05-18', '2012-05-20']), 'y': np.array([38.23, 21.25]) }) forecaster = Prophet(mcmc_samples=1) forecaster.fit(train)
apache-2.0
Python
7461c7b6b729c38194ebb5e88b33e7bcc73b4c9c
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/53604b1779bdbea70bed75fe1695b503e06be323.
tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "53604b1779bdbea70bed75fe1695b503e06be323" TFRT_SHA256 = "b2ce14585f2707ec56b013323fde0ff10ddecdf608854dcf332c46244e0dbd20" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "1c915c952cea8e5d290d241b3a0178856a9ec35b" TFRT_SHA256 = "97f8ad0010b924f8489ca04e8e5aa5aea4a69013293e6575137176a6a8d80168" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
apache-2.0
Python
0fe76a38aff965aca9f672b48ed4a4933ee10161
add an argument taskid to EventLoopProgressReportWriter.write()
TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl
AlphaTwirl/EventReader/EventLoopProgressReportWriter.py
AlphaTwirl/EventReader/EventLoopProgressReportWriter.py
# Tai Sakuma <[email protected]> from AlphaTwirl.ProgressBar import ProgressReport ##____________________________________________________________________________|| class EventLoopProgressReportWriter(object): def write(self, taskid, component, event): return ProgressReport( name = component.name, done = event.iEvent + 1, total = event.nEvents, taskid = taskid ) ##____________________________________________________________________________||
# Tai Sakuma <[email protected]> from AlphaTwirl.ProgressBar import ProgressReport ##____________________________________________________________________________|| class EventLoopProgressReportWriter(object): def write(self, component, event): return ProgressReport(name = component.name, done = event.iEvent + 1, total = event.nEvents) ##____________________________________________________________________________||
bsd-3-clause
Python
662cc443f7c32182aaef89e5b61e90797b7e3e58
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/81d27bd006f86cc3fd3d78a7193583ab9d18367a.
tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "81d27bd006f86cc3fd3d78a7193583ab9d18367a" TFRT_SHA256 = "f7cafc8d2b512ff3be61dc5a3d8a3a5bcc3e749b213c1afa4909116b90710e2e" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "bd4c5dc54997aaffe6f37a802b106c3ac88f150f" TFRT_SHA256 = "a3ee3c259c5d7ea631177a75195b35bbfb695d69ad70adf4b0830ee2d91a9625" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
apache-2.0
Python
f0d76cae236cded0bfa6cc0f6486efb04daeb133
convert latency to int before posting to cbmonitor
couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner
cbagent/collectors/secondary_latency.py
cbagent/collectors/secondary_latency.py
import os.path from cbagent.collectors import Collector class SecondaryLatencyStats(Collector): COLLECTOR = "secondaryscan_latency" def _get_secondaryscan_latency(self): stats = {} if os.path.isfile(self.secondary_statsfile): with open(self.secondary_statsfile, 'rb') as fh: next(fh).decode() fh.seek(-400, 2) last = fh.readlines()[-1].decode() duration = last.split(',')[-1] stats = {} latency = duration.split(':')[1] latency = latency.rstrip() latency_key = duration.split(':')[0] latency_key = latency_key.strip() stats[latency_key] = int(latency) return stats def sample(self): stats = self._get_secondaryscan_latency() if stats: self.update_metric_metadata(stats.keys()) self.store.append(stats, cluster=self.cluster, collector=self.COLLECTOR) def update_metadata(self): self.mc.add_cluster()
import os.path from cbagent.collectors import Collector class SecondaryLatencyStats(Collector): COLLECTOR = "secondaryscan_latency" def _get_secondaryscan_latency(self): stats = {} if os.path.isfile(self.secondary_statsfile): with open(self.secondary_statsfile, 'rb') as fh: next(fh).decode() fh.seek(-400, 2) last = fh.readlines()[-1].decode() duration = last.split(',')[-1] stats = {} latency = duration.split(':')[1] latency = latency.rstrip() latency_key = duration.split(':')[0] latency_key = latency_key.strip() stats[latency_key] = latency return stats def sample(self): stats = self._get_secondaryscan_latency() if stats: self.update_metric_metadata(stats.keys()) self.store.append(stats, cluster=self.cluster, collector=self.COLLECTOR) def update_metadata(self): self.mc.add_cluster()
apache-2.0
Python
69c9322827ed95ce845b49119bc58aa4f36d82bb
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/ecf8607212b519546828e3fcc66f68985597a622.
gautam1858/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,yongtang/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "ecf8607212b519546828e3fcc66f68985597a622" TFRT_SHA256 = "545c097a241ff80701e54d1e088762f27a7494980f01c08fee3ce3aeb4fd22cf" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "078534d79809852ea069d23bbacd2483ade18c11" TFRT_SHA256 = "55905ff389c5294ac1ce4be5e3f0af2d171e6061aa886fb66d59e3636f03412b" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
apache-2.0
Python
66284e57accec5977d606fc91a0b28177b352eb4
Add end-to-end integration testing for all compression types
dpkp/kafka-python,ohmu/kafka-python,Yelp/kafka-python,ohmu/kafka-python,zackdever/kafka-python,Aloomaio/kafka-python,DataDog/kafka-python,Aloomaio/kafka-python,wikimedia/operations-debs-python-kafka,mumrah/kafka-python,scrapinghub/kafka-python,mumrah/kafka-python,dpkp/kafka-python,Yelp/kafka-python,scrapinghub/kafka-python,wikimedia/operations-debs-python-kafka,zackdever/kafka-python
test/test_producer.py
test/test_producer.py
import pytest from kafka import KafkaConsumer, KafkaProducer from test.conftest import version from test.testutil import random_string @pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set") @pytest.mark.parametrize("compression", [None, 'gzip', 'snappy', 'lz4']) def test_end_to_end(kafka_broker, compression): # LZ4 requires 0.8.2 if compression == 'lz4' and version() < (0, 8, 2): return connect_str = 'localhost:' + str(kafka_broker.port) producer = KafkaProducer(bootstrap_servers=connect_str, max_block_ms=10000, compression_type=compression, value_serializer=str.encode) consumer = KafkaConsumer(bootstrap_servers=connect_str, group_id=None, consumer_timeout_ms=10000, auto_offset_reset='earliest', value_deserializer=bytes.decode) topic = random_string(5) for i in range(1000): producer.send(topic, 'msg %d' % i) producer.flush() producer.close() consumer.subscribe([topic]) msgs = set() for i in range(1000): try: msgs.add(next(consumer).value) except StopIteration: break assert msgs == set(['msg %d' % i for i in range(1000)])
import pytest from kafka import KafkaConsumer, KafkaProducer from test.conftest import version from test.testutil import random_string @pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set") def test_end_to_end(kafka_broker): connect_str = 'localhost:' + str(kafka_broker.port) producer = KafkaProducer(bootstrap_servers=connect_str, max_block_ms=10000, value_serializer=str.encode) consumer = KafkaConsumer(bootstrap_servers=connect_str, group_id=None, consumer_timeout_ms=10000, auto_offset_reset='earliest', value_deserializer=bytes.decode) topic = random_string(5) for i in range(1000): producer.send(topic, 'msg %d' % i) producer.flush() producer.close() consumer.subscribe([topic]) msgs = set() for i in range(1000): try: msgs.add(next(consumer).value) except StopIteration: break assert msgs == set(['msg %d' % i for i in range(1000)])
apache-2.0
Python
7c12b82cb410540dfa3b65150ce39924b5793bce
handle package.json exceptions
piton-package-manager/piton
python_package_manager/utils/package_json.py
python_package_manager/utils/package_json.py
import os import json def get_dependencies(): package_file_path = os.path.join(os.getcwd(), 'package.json') try: with open(package_file_path, 'r') as infile: package_dict = json.load(infile) dependencies = package_dict.get("pythonDependencies", []) dependencies_dev = package_dict.get("pythonDevDependencies", []) except: print("unable to read package.json") return [] return dependencies def write_dependencies(dependencies): package_file_path = os.path.join(os.getcwd(), 'package.json') try: with open(package_file_path, 'r') as infile: package_dict = json.load(infile) package_dict["pythonDependencies"] = dependencies except: print("unable to read package.json") return try: with open(package_file_path, 'w') as outfile: json.dump(package_dict, outfile, indent=2) except: print("unable to write package.json") return
import os import json def get_dependencies(): package_file_path = os.path.join(os.getcwd(), 'package.json') with open(package_file_path, 'r') as infile: package_dict = json.load(infile) dependencies = package_dict.get("pythonDependencies", []) dependencies_dev = package_dict.get("pythonDevDependencies", []) return dependencies def write_dependencies(dependencies): package_file_path = os.path.join(os.getcwd(), 'package.json') with open(package_file_path, 'r') as infile: package_dict = json.load(infile) package_dict["pythonDependencies"] = dependencies with open(package_file_path, 'w') as outfile: json.dump(package_dict, outfile, indent=2)
mit
Python
b0dd18d4e4e18dafae9d93848f633afc396c91b4
remove outdated/misguided meta __variables__, https://mail.python.org/pipermail/python-dev/2001-March/013328.html
fastly/fastly-py,fastly/fastly-py
fastly/__init__.py
fastly/__init__.py
from fastly import *
""" """ from fastly import * __author__ = 'Tyler McMullen <[email protected]>' __copyright__ = 'Copyright (c) 2012 Fastly Inc' __license__ = 'BSD' __version__ = '0.0.1' __url__ = 'http://www.fastly.com/docs/fastly-py'
mit
Python
bc02e845f4a8b726f7474efa77753c7de6fe600b
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/300e7ac61cda0eb2ddb13b7f2ad850d80646adcd.
gautam1858/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,karllessard/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,karllessard/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "300e7ac61cda0eb2ddb13b7f2ad850d80646adcd" TFRT_SHA256 = "2b79ada8dbacd5de1b868121822ffde58564a1f8749c4f3d91f8f951e76c3fbc" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "ed6f666ac14b939d7303607c950b88b7d5607c46" TFRT_SHA256 = "b99fed746abe39cb0b072e773af53a4c7189056737fc0118ef3b013c187660c9" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), "https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT), ], )
apache-2.0
Python
42a147b0dcc24ea51207cca020d2bfc6fa7bde46
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/926650aa8e303d62814e45f709d16673501d96bc.
tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow
third_party/tf_runtime/workspace.bzl
third_party/tf_runtime/workspace.bzl
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "926650aa8e303d62814e45f709d16673501d96bc" TFRT_SHA256 = "f178d137127c3a67962362f596b8015fdcdc58271e1e3d692eba47b09d31402a" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
"""Provides the repository macro to import TFRT.""" load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls") def repo(): """Imports TFRT.""" # Attention: tools parse and update these lines. TFRT_COMMIT = "d50aae4b79fb4aa5a3c4dd280004313c7f1fda51" TFRT_SHA256 = "3d02021cbd499d749eeb4e3e6bdcd47a67695bfc145827c5821548c3c6f1494c" tf_http_archive( name = "tf_runtime", sha256 = TFRT_SHA256, strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT), urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)), # A patch file can be provided for atomic commits to both TF and TFRT. # The job that bumps the TFRT_COMMIT also resets patch_file to 'None'. patch_file = None, )
apache-2.0
Python
4cbbe7c3ab891a11492f368d780a1416d37358ff
Change the method of generating content of GUID element
feedzilla/feedzilla,feedzilla/feedzilla,feedzilla/feedzilla
feedzilla/syndication.py
feedzilla/syndication.py
# -*- coding: utf-8 -*- # Copyright: 2011, Grigoriy Petukhov # Author: Grigoriy Petukhov (http://lorien.name) # License: BSD from django.contrib.syndication.views import Feed from django.conf import settings from feedzilla.models import Post class PostFeed(Feed): title_template = 'feedzilla/feed/post_title.html' description_template = 'feedzilla/feed/post_description.html' title = settings.FEEDZILLA_SITE_TITLE description = settings.FEEDZILLA_SITE_DESCRIPTION link = '/' def items(self, obj): return Post.active_objects.all()\ .order_by('-created')[:settings.FEEDZILLA_PAGE_SIZE] #def item_title(self, item): #return item.name #def item_description(self, item): #return item.description def item_pubdate(self, item): return item.created def item_guid(self, item): return item.link
# -*- coding: utf-8 -*- # Copyright: 2011, Grigoriy Petukhov # Author: Grigoriy Petukhov (http://lorien.name) # License: BSD from django.contrib.syndication.views import Feed from django.conf import settings from feedzilla.models import Post class PostFeed(Feed): title_template = 'feedzilla/feed/post_title.html' description_template = 'feedzilla/feed/post_description.html' title = settings.FEEDZILLA_SITE_TITLE description = settings.FEEDZILLA_SITE_DESCRIPTION link = '/' def items(self, obj): return Post.active_objects.all()\ .order_by('-created')[:settings.FEEDZILLA_PAGE_SIZE] #def item_title(self, item): #return item.name #def item_description(self, item): #return item.description def item_pubdate(self, item): return item.created def item_guid(self, item): return str(item.guid)
bsd-3-clause
Python
4e942272c00c943eb2402a94b86f2c5a0c778ac0
update post_sync tests
adorton-adobe/user-sync.py,adorton-adobe/user-sync.py,adobe-apiplatform/user-sync.py,adobe-apiplatform/user-sync.py
tests/test_post_sync.py
tests/test_post_sync.py
import pytest from user_sync.post_sync.manager import PostSyncData @pytest.fixture def example_user(): return { 'type': 'federatedID', 'username': '[email protected]', 'domain': 'example.com', 'email': '[email protected]', 'firstname': 'Example', 'lastname': 'User', 'groups': set(), 'country': 'US', } def test_add_umapi_user(example_user): email_id = '[email protected]' post_sync_data = PostSyncData() post_sync_data.update_umapi_data(None, email_id, [], [], **example_user) assert post_sync_data.umapi_data[None][email_id] == example_user def test_add_groups(example_user): post_sync_data = PostSyncData() email_id = '[email protected]' example_user['groups'] = {'group1', 'group2', 'group3'} groups_add = ['group3', 'group4', 'group5'] post_sync_data.update_umapi_data(None, email_id, groups_add, [], **example_user) assert post_sync_data.umapi_data[None][email_id]['groups'] == example_user['groups'] | set(groups_add) def test_remove_groups(example_user): post_sync_data = PostSyncData() email_id = '[email protected]' example_user['groups'] = {'group1', 'group2', 'group3'} groups_remove = ['group1', 'group2'] post_sync_data.update_umapi_data(None, email_id, [], groups_remove, **example_user) assert post_sync_data.umapi_data[None][email_id]['groups'] == example_user['groups'] - set(groups_remove) def test_add_remove_groups(example_user): post_sync_data = PostSyncData() email_id = '[email protected]' example_user['groups'] = {'group1', 'group2', 'group3', 'group4', 'group5'} groups_add = ['group6'] groups_remove = ['group1', 'group2'] post_sync_data.update_umapi_data(None, email_id, groups_add, groups_remove, **example_user) delta_groups = example_user['groups'] | set(groups_add) delta_groups -= set(groups_remove) assert post_sync_data.umapi_data[None][email_id]['groups'] == delta_groups
import pytest from user_sync.post_sync import manager PostSyncManager = manager.PostSyncManager @pytest.fixture def example_user(): return { 'identity_type': 'federatedID', 'username': '[email protected]', 'domain': 'example.com', 'email': '[email protected]', 'firstname': 'Example', 'lastname': 'User', 'groups': [], 'country': 'US', } def test_add_umapi_user(example_user, monkeypatch): with monkeypatch.context() as m: m.setattr(manager, '_SYNC_DATA_STORE', {}) email_id = '[email protected]' PostSyncManager.update_sync_data(email_id, 'umapi_data', [], [], **example_user) assert manager._SYNC_DATA_STORE[email_id.lower()]['umapi_data'] == example_user def test_add_groups(example_user, monkeypatch): with monkeypatch.context() as m: m.setattr(manager, '_SYNC_DATA_STORE', {}) email_id = '[email protected]' example_user['groups'] = ['group1', 'group2', 'group3'] groups_add = ['group3', 'group4', 'group5'] PostSyncManager.update_sync_data(email_id, 'umapi_data', groups_add, [], **example_user) assert sorted(manager._SYNC_DATA_STORE[email_id.lower()]['umapi_data']['groups']) == sorted(list(set( example_user['groups']) | set(groups_add))) def test_remove_groups(example_user, monkeypatch): with monkeypatch.context() as m: m.setattr(manager, '_SYNC_DATA_STORE', {}) email_id = '[email protected]' example_user['groups'] = ['group1', 'group2', 'group3'] groups_remove = ['group1', 'group2'] PostSyncManager.update_sync_data(email_id, 'umapi_data', [], groups_remove, **example_user) assert sorted(manager._SYNC_DATA_STORE[email_id.lower()]['umapi_data']['groups']) == sorted(list(set( example_user['groups']) - set(groups_remove))) def test_add_remove_groups(example_user, monkeypatch): with monkeypatch.context() as m: m.setattr(manager, '_SYNC_DATA_STORE', {}) email_id = '[email protected]' example_user['groups'] = ['group1', 'group2', 'group3', 'group4', 'group5'] groups_add = ['group6'] groups_remove = ['group1', 'group2'] PostSyncManager.update_sync_data(email_id, 'umapi_data', groups_add, groups_remove, **example_user) delta_groups = list(set(example_user['groups']) | set(groups_add)) delta_groups = list(set(delta_groups) - set(groups_remove)) assert sorted(manager._SYNC_DATA_STORE[email_id.lower()]['umapi_data']['groups']) == sorted(delta_groups)
mit
Python
20dc4b6d80842579740ed91ebb848446a0cecdbf
fix test_settings
eloquence/unisubs,wevoice/wesub,ujdhesa/unisubs,norayr/unisubs,ReachingOut/unisubs,ReachingOut/unisubs,pculture/unisubs,pculture/unisubs,norayr/unisubs,norayr/unisubs,norayr/unisubs,ofer43211/unisubs,ujdhesa/unisubs,eloquence/unisubs,wevoice/wesub,ReachingOut/unisubs,ofer43211/unisubs,ofer43211/unisubs,eloquence/unisubs,eloquence/unisubs,ujdhesa/unisubs,ujdhesa/unisubs,ofer43211/unisubs,wevoice/wesub,pculture/unisubs,wevoice/wesub,pculture/unisubs,ReachingOut/unisubs
test_settings.py
test_settings.py
from settings import * __import__('dev-settings', globals(), locals(), ['*'], -1) ROOT_URLCONF = 'urls' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': rel('mirosubs.sqlite3'), } } INSTALLED_APPS += ('django_nose', ) INSTALLED_APPS = list(INSTALLED_APPS) INSTALLED_APPS.remove('mirosubs') TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
from settings import * ROOT_URLCONF = 'urls' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': rel('mirosubs.sqlite3'), } } INSTALLED_APPS += ('django_nose', ) INSTALLED_APPS = list(INSTALLED_APPS) INSTALLED_APPS.remove('mirosubs') TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
agpl-3.0
Python
555981d288b1e3970e2cb9432db3e72f57ba48b4
deal with zero args corner case and return correct type
FInAT/FInAT
finat/pyop2_interface.py
finat/pyop2_interface.py
try: from pyop2.pyparloop import Kernel except: Kernel = None from .interpreter import evaluate def pyop2_kernel(kernel, kernel_args, interpreter=False): """Return a :class:`pyop2.Kernel` from the recipe and kernel data provided. :param kernel: The :class:`~.utils.Kernel` to map to PyOP2. :param kernel_args: The ordered list of Pymbolic variables constituting the kernel arguments, excluding the result of the recipe (the latter should be prepended to the argument list). :param interpreter: If set to ``True``, the kernel will be evaluated using the FInAT interpreter instead of generating a compiled kernel. :result: The :class:`pyop2.Kernel` """ if Kernel is None: raise ImportError("pyop2 was not imported. Is it installed?") if kernel_args and \ set(kernel_args) != kernel.kernel_data.kernel_args: raise ValueError("Incomplete value list") if interpreter: def kernel_function(*args): context = {kernel_args: args[1:]} args[0][:] = evaluate(kernel.recipe, context, kernel.kernel_data) return Kernel(kernel_function) else: raise NotImplementedError
try: from pyop2.pyparloop import Kernel except: Kernel = None from .interpreter import evaluate def pyop2_kernel(kernel, kernel_args, interpreter=False): """Return a :class:`pyop2.Kernel` from the recipe and kernel data provided. :param kernel: The :class:`~.utils.Kernel` to map to PyOP2. :param kernel_args: The ordered list of Pymbolic variables constituting the kernel arguments, excluding the result of the recipe (the latter should be prepended to the argument list). :param interpreter: If set to ``True``, the kernel will be evaluated using the FInAT interpreter instead of generating a compiled kernel. :result: The :class:`pyop2.Kernel` """ if Kernel is None: raise ImportError("pyop2 was not imported. Is it installed?") if set(kernel_args) != kernel.kernel_data.kernel_args: raise ValueError("Incomplete value list") if interpreter: def kernel_function(*args): context = {kernel_args: args[1:]} args[0][:] = evaluate(kernel.recipe, context, kernel.kernel_data) return (Kernel(kernel_function), kernel_args) else: raise NotImplementedError
mit
Python
12a85a17194610f81c9ff0c73ea69f4adfc2b307
remove old routine
fireeye/flare-floss,fireeye/flare-floss
floss/render/sanitize.py
floss/render/sanitize.py
import string def sanitize_string_for_printing(s: str) -> str: """ Return sanitized string for printing to cli. """ sanitized_string = s.replace("\\\\", "\\") # print single backslashes sanitized_string = "".join(c for c in sanitized_string if c in string.printable) return sanitized_string
import string def sanitize_string_for_printing(s: str) -> str: """ Return sanitized string for printing to cli. """ sanitized_string = s.replace("\\\\", "\\") # print single backslashes sanitized_string = "".join(c for c in sanitized_string if c in string.printable) return sanitized_string def sanitize_string_for_script(s: str) -> str: """ Return sanitized string that is added to IDA script source. """ sanitized_string = sanitize_string_for_printing(s) sanitized_string = sanitized_string.replace("\\", "\\\\") sanitized_string = sanitized_string.replace('"', '\\"') return sanitized_string
apache-2.0
Python
8379d56ac1be68c9c1d255893644813df8300ed8
add verbose name
manducku/awesomepose,manducku/awesomepose,manducku/awesomepose,manducku/awesomepose
awesomepose/categories/models/category.py
awesomepose/categories/models/category.py
from django.db import models from mptt.models import MPTTModel, TreeForeignKey class Category(MPTTModel): name = models.CharField(max_length=50, unique=True) parent = TreeForeignKey('self', null=True, blank=True, related_name="children", db_index=True) class MPTTMeta: order_insertion_by = ['name'] class Meta: verbose_name = "카테고리" verbose_name_plural = verbose_name def __str__(self): return self.name
from django.db import models from mptt.models import MPTTModel, TreeForeignKey class Category(models.Model): name = models.CharField(max_length=50, unique=True) parent = TreeForeignKey('self', null=True, blank=True, related_name="children", db_index=True) class MPTTMeta: order_insertion_by = ['name'] def __str__(self): return self.name
mit
Python
bcd8d27194131e48d73d843bdae9930e6720130f
Update Vartype
oneklc/dimod,oneklc/dimod
dimod/vartypes.py
dimod/vartypes.py
""" Enumeration of valid variable types for binary quadratic models. Examples: This example shows easy access to different Vartypes, which are in the main namespace. >>> vartype = dimod.SPIN >>> print(vartype) Vartype.SPIN >>> vartype = dimod.BINARY >>> print(vartype) Vartype.BINARY >>> vartype = dimod.Vartype.SPIN >>> print(vartype) Vartype.SPIN >>> isinstance(vartype, dimod.Vartype) True This example shows access by value or name. >>> print(dimod.Vartype({0, 1})) Vartype.BINARY >>> print(dimod.Vartype['SPIN']) Vartype.SPIN This example uses the `.value` parameter to validate. >>> sample = {'u': -1, 'v': 1} >>> vartype = dimod.Vartype.SPIN >>> all(val in vartype.value for val in sample.values()) True """ import enum __all__ = ['Vartype', 'SPIN', 'BINARY'] class Vartype(enum.Enum): """An :py:class:`~enum.Enum` over the types of variables for the binary quadratic model. Attributes: SPIN (:class:`.Vartype`): Vartype for spin-valued models; variables of the model are either -1 or 1. BINARY (:class:`.Vartype`): Vartype for binary models; variables of the model are either 0 or 1. """ SPIN = frozenset({-1, 1}) BINARY = frozenset({0, 1}) SPIN = Vartype.SPIN BINARY = Vartype.BINARY
""" Vartype is an enumeration of the valid types for variables in a binary quadratic models. Examples: >>> vartype = dimod.Vartype.SPIN >>> print(vartype) Vartype.SPIN >>> isinstance(vartype, dimod.Vartype) True Access can also be by value or name. >>> print(dimod.Vartype({0, 1})) Vartype.BINARY >>> print(dimod.Vartype['SPIN']) Vartype.SPIN To check correctness, use the `.value` parameter. >>> sample = {'u': -1, 'v': 1} >>> vartype = dimod.Vartype.SPIN >>> all(val in vartype.value for val in sample.values()) True The different Vartypes are also in the main namespace for easy access. >>> vartype = dimod.SPIN >>> print(vartype) Vartype.SPIN >>> vartype = dimod.BINARY >>> print(vartype) Vartype.BINARY """ import enum __all__ = ['Vartype', 'SPIN', 'BINARY'] class Vartype(enum.Enum): """An :py:class:`~enum.Enum` over the types of variables for the binary quadratic model. Attributes: SPIN (:class:`.Vartype`): The vartype for spin-valued models. That is the variables of the model are either -1 or 1. BINARY (:class:`.Vartype`): The vartype for binary models. That is the variables of the model are either 0 or 1. """ SPIN = frozenset({-1, 1}) BINARY = frozenset({0, 1}) SPIN = Vartype.SPIN BINARY = Vartype.BINARY
apache-2.0
Python
0cb9b65fc0030922fea122a82451fef0d6d3653b
update version 1.0.0
bird-house/flyingpigeon
flyingpigeon/__init__.py
flyingpigeon/__init__.py
from .wsgi import application from .demo import main __version__ = "1.0.0"
from .wsgi import application from .demo import main __version__ = "0.11.0"
apache-2.0
Python
cfc6083c58d151934403ccf55444b122fec46604
Resolve here
takeyourmeds/takeyourmeds-web,takeyourmeds/takeyourmeds-web,takeyourmeds/takeyourmeds-web,takeyourmeds/takeyourmeds-web
takeyourmeds/utils/test.py
takeyourmeds/utils/test.py
from django.test import TestCase from django.shortcuts import resolve_url from django.contrib.auth import get_user_model User = get_user_model() class TestCase(TestCase): def setUp(self): self.user = self.create_user('testuser') def assertStatusCode(self, status_code, fn, urlconf, *args, **kwargs): if kwargs.pop('login', False): user = kwargs.pop('user', self.user) self.client.login(email=user.email, password='password') response = fn(resolve_url(urlconf, *args, **kwargs)) self.assertEqual( response.status_code, status_code, "Got HTTP %d but expected HTTP %d. Response:\n%s" % ( response.status_code, status_code, response, ) ) return response def assertGET(self, status_code, urlconf, *args, **kwargs): return self.assertStatusCode( status_code, self.client.get, urlconf, *args, **kwargs ) def assertPOST(self, status_code, data, *args, **kwargs): return self.assertStatusCode( status_code, lambda x: self.client.post(x, data), *args, **kwargs ) def assertRedirectsTo(self, response, urlconf, *args, **kwargs): status_code = kwargs.pop('status_code', 302) target_status_code = kwargs.pop('target_status_code', 200) return self.assertRedirects( response, resolve_url(urlconf, *args, **kwargs), status_code, target_status_code, ) def create_user(self, email): return User.objects.create_user(email, 'password') class SuperuserTestCase(TestCase): def setUp(self): super(SuperuserTestCase, self).setUp() self.user.is_staff = True self.user.is_superuser = True self.user.save()
from django.test import TestCase from django.shortcuts import resolve_url from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse User = get_user_model() class TestCase(TestCase): def setUp(self): self.user = self.create_user('testuser') def assertStatusCode(self, status_code, fn, urlconf, *args, **kwargs): if kwargs.pop('login', False): user = kwargs.pop('user', self.user) self.client.login(email=user.email, password='password') response = fn(resolve_url(urlconf, *args, **kwargs)) self.assertEqual( response.status_code, status_code, "Got HTTP %d but expected HTTP %d. Response:\n%s" % ( response.status_code, status_code, response, ) ) return response def assertGET(self, status_code, urlconf, *args, **kwargs): return self.assertStatusCode( status_code, self.client.get, urlconf, *args, **kwargs ) def assertPOST(self, status_code, data, *args, **kwargs): return self.assertStatusCode( status_code, lambda x: self.client.post(x, data), *args, **kwargs ) def assertRedirectsTo(self, response, urlconf, *args, **kwargs): status_code = kwargs.pop('status_code', 302) target_status_code = kwargs.pop('target_status_code', 200) return self.assertRedirects( response, reverse(urlconf, args=args, kwargs=kwargs), status_code, target_status_code, ) def create_user(self, email): return User.objects.create_user(email, 'password') class SuperuserTestCase(TestCase): def setUp(self): super(SuperuserTestCase, self).setUp() self.user.is_staff = True self.user.is_superuser = True self.user.save()
mit
Python
d9ca3d7113423a84026ad59e1369321baa54d532
Add a simple neutron_hanler
fs714/drcontroller
drcontroller/replication/controller/neutron_handler.py
drcontroller/replication/controller/neutron_handler.py
import logging import base_handler class NeutronHandler(base_handler.BaseHandler): def __init__(self, set_conf, handle_type): ''' set_conf: the configuration file path of keystone authorization handle_type: the handle service type, eg, glance, nova, neutron ''' self.logger = logging.getLogger("NeutronHandler") self.logger.info('Init NeutronHandler') super(NeutronHandler, self).__init__(set_conf, handle_type)
import logging def post_handle(message): pass def delete_handle(message): pass def put_handle(mesage): pass class NeutronHandler(object): def __init__(self): self.logger = logging.getLogger("NeutronHandler") self.logger.info('Init NeutronHandler') def accept(self, *req, **kwargs): self.logger = logging.getLogger("NeutronHandler:accept") self.logger.info("--- Hello Neutron ---") return ['Hello Neutron']
apache-2.0
Python
ca8622f5af66ef01c9c185065f2e77fca30bef79
Remove unused update method
kylef/irctk
irctk/nick.py
irctk/nick.py
import re class Nick(object): IRC_USERHOST_REGEX = re.compile(r'^(.*)!(.*)@(.*)$') @classmethod def parse(cls, client, userhost): m = cls.IRC_USERHOST_REGEX.match(userhost) if m: return cls(client, m.group(1), m.group(2), m.group(3)) return cls(client, host=userhost) def __init__(self, client, nick='', ident='', host=''): self.client = client self.nick = nick self.ident = ident self.host = host def __str__(self): return self.nick def __repr__(self): return '<Nick %s!%s@%s>' % (self.nick, self.ident, self.host) def __eq__(self, other): return self.client.irc_equal(str(other), self.nick) @property def channels(self): """ Returns all the Channels that both the nick and the client has joined. """ return [channel for channel in self.client.channels if channel.has_nick(self)]
import re class Nick(object): IRC_USERHOST_REGEX = re.compile(r'^(.*)!(.*)@(.*)$') @classmethod def parse(cls, client, userhost): m = cls.IRC_USERHOST_REGEX.match(userhost) if m: return cls(client, m.group(1), m.group(2), m.group(3)) return cls(client, host=userhost) def __init__(self, client, nick='', ident='', host=''): self.client = client self.nick = nick self.ident = ident self.host = host def __str__(self): return self.nick def __repr__(self): return '<Nick %s!%s@%s>' % (self.nick, self.ident, self.host) def __eq__(self, other): return self.client.irc_equal(str(other), self.nick) @property def channels(self): """ Returns all the Channels that both the nick and the client has joined. """ return [channel for channel in self.client.channels if channel.has_nick(self)] def update(self): if self == self.client.nick: self.client.nick.ident = self.ident self.client.nick.host = self.host for channel in self.client.channels: n = channel.find_nick(self) if n: n.ident = self.ident n.host = self.host
bsd-3-clause
Python
52c17672d73a9461771c3ec09465d91992160fc5
Fix quota init migration
opennode/nodeconductor-saltstack
src/nodeconductor_saltstack/exchange/migrations/0004_init_quotas.py
src/nodeconductor_saltstack/exchange/migrations/0004_init_quotas.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from uuid import uuid4 from django.contrib.contenttypes.models import ContentType from django.db import models, migrations GLOBAL_MAILBOX_SIZE_QUOTA = 'global_mailbox_size' USER_COUNT_QUOTA = 'user_count' def convert_mailbox_size_to_mb(apps, schema_editor): Tenant = apps.get_model('exchange', 'ExchangeTenant') for tenant in Tenant.objects.all(): tenant.mailbox_size *= 1024 tenant.save() def init_quotas(apps, schema_editor): Quota = apps.get_model('quotas', 'Quota') Tenant = apps.get_model('exchange', 'ExchangeTenant') tenant_ct = ContentType.objects.get_for_model(Tenant) for tenant in Tenant.objects.all(): if not Quota.objects.filter(content_type_id=tenant_ct.id, object_id=tenant.id, name=GLOBAL_MAILBOX_SIZE_QUOTA): Quota.objects.create( uuid=uuid4(), name=GLOBAL_MAILBOX_SIZE_QUOTA, limit=tenant.max_users*tenant.mailbox_size, usage=0, content_type_id=tenant_ct.id, object_id=tenant.id) if not Quota.objects.filter(content_type_id=tenant_ct.id, object_id=tenant.id, name=USER_COUNT_QUOTA): Quota.objects.create( uuid=uuid4(), name=USER_COUNT_QUOTA, limit=tenant.max_users, usage=0, content_type_id=tenant_ct.id, object_id=tenant.id) class Migration(migrations.Migration): dependencies = [ ('exchange', '0003_rename_tenant_model'), ] operations = [ migrations.AlterField( model_name='exchangetenant', name='mailbox_size', field=models.PositiveSmallIntegerField(help_text=b'Maximum size of single mailbox, MB'), preserve_default=True, ), migrations.RunPython(convert_mailbox_size_to_mb), migrations.RunPython(init_quotas), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from uuid import uuid4 from django.contrib.contenttypes.models import ContentType from django.db import models, migrations GLOBAL_MAILBOX_SIZE_QUOTA = 'global_mailbox_size' USER_COUNT_QUOTA = 'user_count' def convert_mailbox_size_to_mb(apps, schema_editor): Tenant = apps.get_model('exchange', 'Tenant') for tenant in Tenant.objects.all(): tenant.mailbox_size *= 1024 tenant.save() def init_quotas(apps, schema_editor): Quota = apps.get_model('quotas', 'Quota') Tenant = apps.get_model('exchange', 'Tenant') tenant_ct = ContentType.objects.get_for_model(Tenant) for tenant in Tenant.objects.all(): if not Quota.objects.filter(content_type_id=tenant_ct.id, object_id=tenant.id, name=GLOBAL_MAILBOX_SIZE_QUOTA): Quota.objects.create( uuid=uuid4(), name=GLOBAL_MAILBOX_SIZE_QUOTA, limit=tenant.max_users*tenant.mailbox_size, usage=0, content_type_id=tenant_ct.id, object_id=tenant.id) if not Quota.objects.filter(content_type_id=tenant_ct.id, object_id=tenant.id, name=USER_COUNT_QUOTA): Quota.objects.create( uuid=uuid4(), name=USER_COUNT_QUOTA, limit=tenant.max_users, usage=0, content_type_id=tenant_ct.id, object_id=tenant.id) class Migration(migrations.Migration): dependencies = [ ('exchange', '0003_rename_tenant_model'), ] operations = [ migrations.AlterField( model_name='tenant', name='mailbox_size', field=models.PositiveSmallIntegerField(help_text=b'Maximum size of single mailbox, MB'), preserve_default=True, ), migrations.RunPython(convert_mailbox_size_to_mb), migrations.RunPython(init_quotas), ]
mit
Python