commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
8d459d86d33992129726ef177ed24fe8a00e9b75 | Bump version to 4.0.0rc1 | platformio/platformio,platformio/platformio-core,platformio/platformio-core | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0rc1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| apache-2.0 | Python |
5f917746e86c733d37c56e15a97f7aecb73fa75f | fix bug comparing string with int. int(games) | ChelseaStats/mssngVwlsRnd | python/guess_the_player.py | python/guess_the_player.py | # importing modules
import os
import csv
import time
import random
import tweepy
import player
# secrets
consumer_key = os.getenv('c_key')
consumer_secret = os.getenv('c_secret')
access_token = os.getenv('a_token')
access_token_secret = os.getenv('a_secret')
# authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# processing
with open('player_history.csv') as csvfile:
row = random.choice([a for a in list(csv.DictReader(csvfile)) if int(a['Games']) > 1])
po = player.player(
row['Player Name'],
row['Goals'],
row['Games'],
row['Starter'],
row['Sub'],
row['Active'],
row['Debut']
)
api.update_status(status=po.get_guess_player_string())
time.sleep(10*60)
api.update_status(status=f"#GuessThePlayer Well done if you got it, the answer was: {po.name} #CFC #Chelsea")
| # importing modules
import os
import csv
import time
import random
import tweepy
import player
# secrets
consumer_key = os.getenv('c_key')
consumer_secret = os.getenv('c_secret')
access_token = os.getenv('a_token')
access_token_secret = os.getenv('a_secret')
# authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# processing
with open('player_history.csv') as csvfile:
row = random.choice([a for a in list(csv.DictReader(csvfile)) if a['Games'] > 1])
po = player.player(
row['Player Name'],
row['Goals'],
row['Games'],
row['Starter'],
row['Sub'],
row['Active'],
row['Debut']
)
api.update_status(status=po.get_guess_player_string())
time.sleep(10*60)
api.update_status(status=f"#GuessThePlayer Well done if you got it, the answer was: {po.name} #CFC #Chelsea")
| unlicense | Python |
a172a17c815e8fcbe0f8473c6bac1ea1d9714817 | Bump version to 4.4.0a4 | platformio/platformio,platformio/platformio-core,platformio/platformio-core | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 4, "0a4")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Professional development environment for Embedded, IoT, Arduino, CMSIS, ESP-IDF, "
"FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, STM32Cube, Zephyr RTOS, ARM, AVR, "
"Espressif (ESP8266/ESP32), FPGA, MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), "
"NXP i.MX RT, PIC32, RISC-V, STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = "https://api.registry.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
| # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 4, "0a3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Professional development environment for Embedded, IoT, Arduino, CMSIS, ESP-IDF, "
"FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, STM32Cube, Zephyr RTOS, ARM, AVR, "
"Espressif (ESP8266/ESP32), FPGA, MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), "
"NXP i.MX RT, PIC32, RISC-V, STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = "https://api.registry.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
| apache-2.0 | Python |
54921c5dbdc68893fe45649d07d067818c36889b | Bump version to 4.0.0b3 | platformio/platformio-core,platformio/platformio,platformio/platformio-core | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b2")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| apache-2.0 | Python |
74ecf023ef13fdba6378d6b50b3eaeb06b9e0c97 | Rename env vars & modify query | avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox | rebuild_dependant_repos.py | rebuild_dependant_repos.py | import os, sys, re
import requests
from github import Github
CIRCLECI_BASEURL = "https://circleci.com/api/v2"
CIRCLECI_ACCESS_TOKEN = os.environ["AVATAO_CIRCLECI_TOKEN"]
GITHUB_ACCESS_TOKEN = os.environ["AVATAO_GITHUB_TOKEN"]
g = Github(GITHUB_ACCESS_TOKEN)
if len(sys.argv) < 2:
raise AttributeError("The image name is required as the first argument.")
image_name = sys.argv[1]
image_name = re.sub(r"[^a-zA-Z0-9-]", " ", image_name)
query = "org:avatao-content language:Dockerfile " + image_name
print("Searching GitHub with query: '%s'" % query)
code_search = g.search_code(query)
circleci_project_slugs = set()
for result in code_search:
circleci_project_slugs.add(f"gh/{result.repository.organization.login}/{result.repository.name}")
print("Found %d candidate repositories." % len(circleci_project_slugs))
current_item = 1
for slug in circleci_project_slugs:
print("[%d/%d] Triggering CI pipeline for: %s" % (current_item, len(circleci_project_slugs), slug))
requests.post(f"{CIRCLECI_BASEURL}/project/{slug}/pipeline", headers={"Circle-Token": CIRCLECI_ACCESS_TOKEN})
current_item += 1
| import os, sys, re, logging
import requests
from github import Github
logging.basicConfig(level=logging.DEBUG)
CIRCLECI_BASEURL = "https://circleci.com/api/v2"
CIRCLECI_ACCESS_TOKEN = os.environ["TAO_CIRCLECI_TOKEN"]
GITHUB_ACCESS_TOKEN = os.environ["TAO_GITHUB_TOKEN"]
g = Github(GITHUB_ACCESS_TOKEN)
if len(sys.argv) < 2:
raise AttributeError("The image name is required as the first argument.")
image_name = sys.argv[1]
image_name = re.sub(r"[^a-zA-Z0-9-]", " ", image_name)
query = "org:avatao-content language:Dockerfile FROM " + image_name
logging.debug("Searching GitHub with query: '%s'", query)
code_search = g.search_code(query)
circleci_project_slugs = set()
for result in code_search:
circleci_project_slugs.add(f"gh/{result.repository.organization.login}/{result.repository.name}")
logging.debug("Found %d candidate repositories.", len(circleci_project_slugs))
current_item = 1
for slug in circleci_project_slugs:
logging.debug("[%d/%d] Triggering CI pipeline for: %s", current_item, len(circleci_project_slugs), slug)
requests.post(f"{CIRCLECI_BASEURL}/project/{slug}/pipeline", headers={"Circle-Token": CIRCLECI_ACCESS_TOKEN})
current_item += 1
| apache-2.0 | Python |
0ba2b371e08c40e7c4d56efee6f4a828f1e7aeb0 | Update functions.py | KouKariya/tic-tac-toe-py | ref/functions/functions.py | ref/functions/functions.py | #functions.py
#Written by Jesse Gallarzo
#Add code here
#def functionOne():
#def functionTwo():
#def functionThree():
def main():
#Add code here
print('Test')
main()
| #functions.py
#Written by Jesse Gallarzo
#Add code here
#def function One:
def main():
#Add code here
print('Test')
main()
| mit | Python |
7847d22f95f44792e35108af24267161411c5bf1 | Remove settings override with no effect | bittner/django-analytical,pjdelport/django-analytical,jcassee/django-analytical | analytical/tests/test_tag_gosquared.py | analytical/tests/test_tag_gosquared.py | """
Tests for the GoSquared template tags and filters.
"""
from django.contrib.auth.models import User, AnonymousUser
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.gosquared import GoSquaredNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(GOSQUARED_SITE_TOKEN='ABC-123456-D')
class GoSquaredTagTestCase(TagTestCase):
"""
Tests for the ``gosquared`` template tag.
"""
def test_tag(self):
r = self.render_tag('gosquared', 'gosquared')
self.assertTrue('GoSquared.acct = "ABC-123456-D";' in r, r)
def test_node(self):
r = GoSquaredNode().render(Context({}))
self.assertTrue('GoSquared.acct = "ABC-123456-D";' in r, r)
@override_settings(GOSQUARED_SITE_TOKEN=None)
def test_no_token(self):
self.assertRaises(AnalyticalException, GoSquaredNode)
@override_settings(GOSQUARED_SITE_TOKEN='this is not a token')
def test_wrong_token(self):
self.assertRaises(AnalyticalException, GoSquaredNode)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_auto_identify(self):
r = GoSquaredNode().render(Context({'user': User(username='test',
first_name='Test', last_name='User')}))
self.assertTrue('GoSquared.UserName = "Test User";' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_manual_identify(self):
r = GoSquaredNode().render(Context({
'user': User(username='test', first_name='Test', last_name='User'),
'gosquared_identity': 'test_identity',
}))
self.assertTrue('GoSquared.UserName = "test_identity";' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_identify_anonymous_user(self):
r = GoSquaredNode().render(Context({'user': AnonymousUser()}))
self.assertFalse('GoSquared.UserName = ' in r, r)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = GoSquaredNode().render(context)
self.assertTrue(r.startswith(
'<!-- GoSquared disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
| """
Tests for the GoSquared template tags and filters.
"""
from django.contrib.auth.models import User, AnonymousUser
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.gosquared import GoSquaredNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(GOSQUARED_SITE_TOKEN='ABC-123456-D')
class GoSquaredTagTestCase(TagTestCase):
"""
Tests for the ``gosquared`` template tag.
"""
def test_tag(self):
r = self.render_tag('gosquared', 'gosquared')
self.assertTrue('GoSquared.acct = "ABC-123456-D";' in r, r)
def test_node(self):
r = GoSquaredNode().render(Context({}))
self.assertTrue('GoSquared.acct = "ABC-123456-D";' in r, r)
@override_settings(GOSQUARED_SITE_TOKEN=None)
def test_no_token(self):
self.assertRaises(AnalyticalException, GoSquaredNode)
@override_settings(GOSQUARED_SITE_TOKEN='this is not a token')
def test_wrong_token(self):
self.assertRaises(AnalyticalException, GoSquaredNode)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_auto_identify(self):
r = GoSquaredNode().render(Context({'user': User(username='test',
first_name='Test', last_name='User')}))
self.assertTrue('GoSquared.UserName = "Test User";' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_manual_identify(self):
r = GoSquaredNode().render(Context({
'user': User(username='test', first_name='Test', last_name='User'),
'gosquared_identity': 'test_identity',
}))
self.assertTrue('GoSquared.UserName = "test_identity";' in r, r)
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True, USER_ID=None)
def test_identify_anonymous_user(self):
r = GoSquaredNode().render(Context({'user': AnonymousUser()}))
self.assertFalse('GoSquared.UserName = ' in r, r)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = GoSquaredNode().render(context)
self.assertTrue(r.startswith(
'<!-- GoSquared disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
| mit | Python |
8713f44fbd35f012ac7e01a64cffcfdf846fee9f | Remove a relative import that escaped test.test_importlib. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/test/test_importlib/__init__.py | Lib/test/test_importlib/__init__.py | import os
import sys
from test import support
import unittest
def test_suite(package=__package__, directory=os.path.dirname(__file__)):
suite = unittest.TestSuite()
for name in os.listdir(directory):
if name.startswith(('.', '__')):
continue
path = os.path.join(directory, name)
if (os.path.isfile(path) and name.startswith('test_') and
name.endswith('.py')):
submodule_name = os.path.splitext(name)[0]
module_name = "{0}.{1}".format(package, submodule_name)
__import__(module_name, level=0)
module_tests = unittest.findTestCases(sys.modules[module_name])
suite.addTest(module_tests)
elif os.path.isdir(path):
package_name = "{0}.{1}".format(package, name)
__import__(package_name, level=0)
package_tests = getattr(sys.modules[package_name], 'test_suite')()
suite.addTest(package_tests)
else:
continue
return suite
def test_main():
start_dir = os.path.dirname(__file__)
top_dir = os.path.dirname(os.path.dirname(start_dir))
test_loader = unittest.TestLoader()
support.run_unittest(test_loader.discover(start_dir, top_level_dir=top_dir))
| import os
import sys
from .. import support
import unittest
def test_suite(package=__package__, directory=os.path.dirname(__file__)):
suite = unittest.TestSuite()
for name in os.listdir(directory):
if name.startswith(('.', '__')):
continue
path = os.path.join(directory, name)
if (os.path.isfile(path) and name.startswith('test_') and
name.endswith('.py')):
submodule_name = os.path.splitext(name)[0]
module_name = "{0}.{1}".format(package, submodule_name)
__import__(module_name, level=0)
module_tests = unittest.findTestCases(sys.modules[module_name])
suite.addTest(module_tests)
elif os.path.isdir(path):
package_name = "{0}.{1}".format(package, name)
__import__(package_name, level=0)
package_tests = getattr(sys.modules[package_name], 'test_suite')()
suite.addTest(package_tests)
else:
continue
return suite
def test_main():
start_dir = os.path.dirname(__file__)
top_dir = os.path.dirname(os.path.dirname(start_dir))
test_loader = unittest.TestLoader()
support.run_unittest(test_loader.discover(start_dir, top_level_dir=top_dir))
| mit | Python |
4d597a8e71c0020b0f1d36e1cca64b5a353b0643 | modify version | weifind/abepos,weifind/abepos,weifind/abepos,weifind/abepos | Abe/version.py | Abe/version.py | __version__ = '0.88'
| __version__ = '0.8pre'
| agpl-3.0 | Python |
8c7abe561cd95331fef17b4fd1c7fe67386826a2 | change the input url | elixirhub/events-portal-scraping-scripts | AddDataTest.py | AddDataTest.py | __author__ = 'chuqiao'
import EventsPortal
from datetime import datetime
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('adddata')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('adddata.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# EventsPortal.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
logger()
logger.info('start at %s' % datetime.now())
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/upcoming?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://localhost:8983/solr/event_portal");
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","139.162.217.53:8983/solr/eventsportal")
logger.info('finish at %s' % datetime.now())
if __name__ == '__main__':
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","139.162.217.53:8983/solr/eventsportal")
| __author__ = 'chuqiao'
import EventsPortal
from datetime import datetime
import logging
def logger():
"""
Function that initialises logging system
"""
global logger
# create logger with 'syncsolr'
logger = logging.getLogger('adddata')
logger.setLevel(logging.DEBUG)
# specifies the lowest severity that will be dispatched to the appropriate destination
# create file handler which logs even debug messages
fh = logging.FileHandler('adddata.log')
# fh.setLevel(logging.WARN)
# create console handler and set level to debug
ch = logging.StreamHandler()
# StreamHandler instances send messages to streams
# ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# EventsPortal.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
logger()
logger.info('start at %s' % datetime.now())
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/upcoming?state=published&field_type_tid=All", "http://bioevents-portal.org/events","http://139.162.217.53:8983/solr/eventsportal");
# EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull", "http://bioevents-portal.org/events","http://localhost:8983/solr/event_portal");
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","localhost:8983/solr/event_portal")
logger.info('finish at %s' % datetime.now())
if __name__ == '__main__':
EventsPortal.addDataToSolrFromUrl("http://bioevents-portal.org/eventsfull/test?state=published&field_type_tid=All", "http://bioevents-portal.org/events","localhost:8983/solr/event_portal")
| mit | Python |
d98cdb7eae40b5bb11b5d1fc0eacc35ef6bf310d | Order filter for report page | pythonindia/wye,pythonindia/wye,pythonindia/wye,pythonindia/wye | wye/reports/views.py | wye/reports/views.py | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from wye.organisations.models import Organisation
from wye.workshops.models import Workshop
from wye.profiles.models import Profile
import datetime
from wye.base.constants import WorkshopStatus
@login_required
def index(request, days):
print(request.user.is_staff)
if not request.user.is_staff:
return ""
d = datetime.datetime.now() - datetime.timedelta(days=int(days))
organisations = Organisation.objects.filter(
active=True).filter(created_at__gte=d)
workshops = Workshop.objects.filter(
is_active=True).filter(
expected_date__gte=d).filter(
expected_date__lt=datetime.datetime.now()).filter(
status__in=[WorkshopStatus.COMPLETED,
WorkshopStatus.FEEDBACK_PENDING]).order_by('expected_date')
profiles = Profile.objects.filter(user__date_joined__gte=d)
no_of_participants = sum([w.no_of_participants for w in workshops])
template_name = 'reports/index.html'
context_dict = {}
context_dict['organisations'] = organisations
context_dict['workshops'] = workshops
context_dict['profiles'] = profiles
context_dict['no_of_participants'] = no_of_participants
context_dict['date'] = d
workshops = Workshop.objects.filter(
is_active=True)
return render(request, template_name, context_dict)
| from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from wye.organisations.models import Organisation
from wye.workshops.models import Workshop
from wye.profiles.models import Profile
import datetime
from wye.base.constants import WorkshopStatus
@login_required
def index(request, days):
print(request.user.is_staff)
if not request.user.is_staff:
return ""
d = datetime.datetime.now() - datetime.timedelta(days=int(days))
organisations = Organisation.objects.filter(
active=True).filter(created_at__gte=d)
workshops = Workshop.objects.filter(
is_active=True).filter(
expected_date__gte=d).filter(
expected_date__lt=datetime.datetime.now()).filter(
status__in=[WorkshopStatus.COMPLETED,
WorkshopStatus.FEEDBACK_PENDING])
profiles = Profile.objects.filter(user__date_joined__gte=d)
no_of_participants = sum([w.no_of_participants for w in workshops])
template_name = 'reports/index.html'
context_dict = {}
context_dict['organisations'] = organisations
context_dict['workshops'] = workshops
context_dict['profiles'] = profiles
context_dict['no_of_participants'] = no_of_participants
context_dict['date'] = d
workshops = Workshop.objects.filter(
is_active=True)
return render(request, template_name, context_dict)
| mit | Python |
42442f234e6e364a52acbcb788fbeda6c6ffcd8c | Adjust tests for urls. | jakeharding/repo-health,jakeharding/repo-health,jakeharding/repo-health,jakeharding/repo-health | repo_health/index/tests.py | repo_health/index/tests.py | """
Test the index view is being accessed properly.
"""
from django.test import TestCase, Client
from django.urls import reverse
class TestIndexView(TestCase):
client = None
def setUp(self):
self.client = Client()
def test_view_init(self):
"""Test the view can be accessed from the url."""
res = self.client.get("")
self.assertEquals(res.status_code, 302)
redirect = res._headers['location'][1]
self.assertTrue('app' in redirect)
res = self.client.get(redirect)
self.assertEqual(res.status_code, 200)
| """
Test the index view is being accessed properly.
"""
from django.test import TestCase, Client
class TestIndexView(TestCase):
client = None
def setUp(self):
self.client = Client()
def test_view_init(self):
"""Test the view can be accessed from the url.'''
res = self.client.get("")
self.assertEquals(res.status_code, 200)
#Python 3 changed the way strings and bytes work. The `b` here converts the string to bytes.
self.assertTrue(b"Hello World" in res.content)
| mit | Python |
10f7938e37180c0cb3b701223cf6d1855e7d8f93 | Drop python_2_unicode_compatible for Settings, fix docs build on rtfd | watchdogpolska/watchdog-kj-kultura,watchdogpolska/watchdog-kj-kultura,watchdogpolska/watchdog-kj-kultura | watchdog_kj_kultura/main/models.py | watchdog_kj_kultura/main/models.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from tinymce.models import HTMLField
from django.contrib.sites.models import Site
class SettingsQuerySet(models.QuerySet):
pass
class Settings(TimeStampedModel):
site = models.OneToOneField(Site, verbose_name=_("Site"))
home_content = HTMLField(verbose_name=_("Content of home page"))
objects = SettingsQuerySet.as_manager()
class Meta:
verbose_name = _("Settings")
verbose_name_plural = _("Settings")
ordering = ['created', ]
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from model_utils.models import TimeStampedModel
from tinymce.models import HTMLField
from django.contrib.sites.models import Site
class SettingsQuerySet(models.QuerySet):
pass
@python_2_unicode_compatible
class Settings(TimeStampedModel):
site = models.OneToOneField(Site, verbose_name=_("Site"))
home_content = HTMLField(verbose_name=_("Content of home page"))
objects = SettingsQuerySet.as_manager()
class Meta:
verbose_name = _("Settings")
verbose_name_plural = _("Settings")
ordering = ['created', ]
| mit | Python |
5f8580e9d28d08e13f40692a2247f41ea8c5b4b9 | Remove extra newline. | jakirkham/nanshe,jakirkham/nanshe,DudLab/nanshe,DudLab/nanshe,nanshe-org/nanshe,nanshe-org/nanshe | tests/test_additional_io.py | tests/test_additional_io.py | __author__ = "John Kirkham <[email protected]>"
__date__ = "$Jul 28, 2014 11:50:37 EDT$"
import nanshe.nanshe.additional_io
class TestAdditionalIO(object):
num_files = 10
def setup(self):
import tempfile
self.temp_dir = tempfile.mkdtemp()
self.temp_files = []
for i in xrange(TestAdditionalIO.num_files):
self.temp_files.append(tempfile.NamedTemporaryFile(suffix = ".tif", dir = self.temp_dir))
self.temp_files.sort(cmp = lambda a, b: 2*(a.name > b.name) - 1)
def test_expand_pathname_list(self):
import itertools
matched_filenames = nanshe.nanshe.additional_io.expand_pathname_list(self.temp_dir + "/*.tif")
matched_filenames.sort(cmp = lambda a, b: 2*(a > b) - 1)
assert(len(matched_filenames) == len(self.temp_files))
for each_l, each_f in itertools.izip(matched_filenames, self.temp_files):
assert(each_l == each_f.name)
def teardown(self):
import shutil
for i in xrange(len(self.temp_files)):
self.temp_files[i].close()
self.temp_files = []
shutil.rmtree(self.temp_dir)
self.temp_dir = ""
| __author__ = "John Kirkham <[email protected]>"
__date__ = "$Jul 28, 2014 11:50:37 EDT$"
import nanshe.nanshe.additional_io
class TestAdditionalIO(object):
num_files = 10
def setup(self):
import tempfile
self.temp_dir = tempfile.mkdtemp()
self.temp_files = []
for i in xrange(TestAdditionalIO.num_files):
self.temp_files.append(tempfile.NamedTemporaryFile(suffix = ".tif", dir = self.temp_dir))
self.temp_files.sort(cmp = lambda a, b: 2*(a.name > b.name) - 1)
def test_expand_pathname_list(self):
import itertools
matched_filenames = nanshe.nanshe.additional_io.expand_pathname_list(self.temp_dir + "/*.tif")
matched_filenames.sort(cmp = lambda a, b: 2*(a > b) - 1)
assert(len(matched_filenames) == len(self.temp_files))
for each_l, each_f in itertools.izip(matched_filenames, self.temp_files):
assert(each_l == each_f.name)
def teardown(self):
import shutil
for i in xrange(len(self.temp_files)):
self.temp_files[i].close()
self.temp_files = []
shutil.rmtree(self.temp_dir)
self.temp_dir = ""
| bsd-3-clause | Python |
f356adde3cb4776ad0a34b47be54a6e14972ce17 | Improve Python 3.x compatibility | varunarya10/oslo.utils,magic0704/oslo.utils,openstack/oslo.utils,dims/oslo.utils | tests/unit/test_excutils.py | tests/unit/test_excutils.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.common import excutils
from tests import utils
class SaveAndReraiseTest(utils.BaseTestCase):
def test_save_and_reraise_exception(self):
e = None
msg = 'foo'
try:
try:
raise Exception(msg)
except:
with excutils.save_and_reraise_exception():
pass
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
def test_save_and_reraise_exception_dropped(self):
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except:
with excutils.save_and_reraise_exception():
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.common import excutils
from tests import utils
class SaveAndReraiseTest(utils.BaseTestCase):
def test_save_and_reraise_exception(self):
e = None
msg = 'foo'
try:
try:
raise Exception(msg)
except:
with excutils.save_and_reraise_exception():
pass
except Exception, _e:
e = _e
self.assertEqual(str(e), msg)
def test_save_and_reraise_exception_dropped(self):
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except:
with excutils.save_and_reraise_exception():
raise Exception(msg)
except Exception, _e:
e = _e
self.assertEqual(str(e), msg)
| apache-2.0 | Python |
514064ce5a0bc7d3ecab10d1b810e5b751ed79af | update supported thumbnail types | chen-chan/nntpchan,majestrate/nntpchan,chen-chan/nntpchan,majestrate/nntpchan,majestrate/nntpchan,majestrate/nntpchan,chen-chan/nntpchan,chen-chan/nntpchan,chen-chan/nntpchan,chen-chan/nntpchan,majestrate/nntpchan,chen-chan/nntpchan,chen-chan/nntpchan,majestrate/nntpchan,majestrate/nntpchan | contrib/frontends/django/nntpchan/nntpchan/thumbnail.py | contrib/frontends/django/nntpchan/nntpchan/thumbnail.py | from django.conf import settings
import subprocess
import os
img_ext = ['png', 'jpg', 'jpeg', 'gif', 'webp', 'ico']
vid_ext = ['mp4', 'webm', 'm4v', 'ogv', 'avi', 'txt']
def generate(fname, tname, placeholder):
"""
generate thumbnail
"""
ext = fname.split('.')[-1]
cmd = None
if ext in img_ext:
cmd = [settings.CONVERT_PATH, '-thumbnail', '200', fname, tname]
elif ext in vid_ext:
cmd = [settings.FFMPEG_PATH, '-i', fname, '-vf', 'scale=300:200', '-vframes', '1', tname]
if cmd is None:
os.link(placeholder, tname)
else:
subprocess.run(cmd, check=True)
| from django.conf import settings
import subprocess
import os
img_ext = ['png', 'jpg', 'jpeg', 'gif', 'webp', 'ico', 'pdf', 'ps']
vid_ext = ['mp4', 'webm', 'm4v', 'ogv', 'avi', 'txt']
def generate(fname, tname, placeholder):
"""
generate thumbnail
"""
ext = fname.split('.')[-1]
cmd = None
if ext in img_ext:
cmd = [settings.CONVERT_PATH, '-thumbnail', '200', fname, tname]
elif ext in vid_ext:
cmd = [settings.FFMPEG_PATH, '-i', fname, '-vf', 'scale=300:200', '-vframes', '1', tname]
if cmd is None:
os.link(placeholder, tname)
else:
subprocess.run(cmd, check=True)
| mit | Python |
14b473fc1b3a084a22c3e1ef37e2589d91650b2f | Add td_includes argument to allow more flexible relative include paths for td files. | jhseu/tensorflow,xzturn/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,aldian/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,DavidNorman/tensorflow,aam-at/tensorflow,annarev/tensorflow,jhseu/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,annarev/tensorflow,arborh/tensorflow,arborh/tensorflow,petewarden/tensorflow,jhseu/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,ppwwyyxx/tensorflow,DavidNorman/tensorflow,petewarden/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,arborh/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,cxxgtxy/tensorflow,petewarden/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,petewarden/tensorflow,gautam1858/tensorflow,aam-at/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,aldian/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,xzturn/tensorflow,sarvex/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,adit-chandra/tensorflow,gautam1858/tensorflow,chemelnucfin/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,xzturn/tensorflow,jhseu/tensorflow,chemelnucfin/tensorflow,petewarden/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,aldian/tensorflow,jhseu/tensorflow,ppwwyyxx/tensorflow,gunan/tensorflow,arborh/tensorflow,DavidNorman/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,arborh/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,annarev/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,Intel-Corporation/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,cxxgtxy/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow,davidzchen/tensorflow,ppwwyyxx/tensorflow,aldian/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow,DavidNorman/tensorflow,adit-chandra/tensorflow,petewarden/tensorflow,gunan/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ppwwyyxx/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,cxxgtxy/tensorflow,adit-chandra/tensorflow,frreiss/tensorflow-fred,chemelnucfin/tensorflow,DavidNorman/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,petewarden/tensorflow,xzturn/tensorflow,yongtang/tensorflow,aldian/tensorflow,annarev/tensorflow,adit-chandra/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,chemelnucfin/tensorflow,xzturn/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,sarvex/tensorflow,DavidNorman/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,xzturn/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,ppwwyyxx/tensorflow,aam-at/tensorflow,paolodedios/tensorflow,gunan/tensorflow,ppwwyyxx/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,cxxgtxy/tensorflow,ppwwyyxx/tensorflow,davidzchen/tensorflow,chemelnucfin/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow,gunan/tensorflow,xzturn/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,aam-at/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,DavidNorman/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,arborh/tensorflow,gunan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,adit-chandra/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,chemelnucfin/tensorflow,jhseu/tensorflow,jhseu/tensorflow,annarev/tensorflow,karllessard/tensorflow,xzturn/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,xzturn/tensorflow,arborh/tensorflow,sarvex/tensorflow,cxxgtxy/tensorflow,aam-at/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,aam-at/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,jhseu/tensorflow,renyi533/tensorflow,gunan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,DavidNorman/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,gunan/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,ppwwyyxx/tensorflow,renyi533/tensorflow,aam-at/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,jhseu/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,renyi533/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,paolodedios/tensorflow,jhseu/tensorflow,renyi533/tensorflow,xzturn/tensorflow,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,ppwwyyxx/tensorflow,sarvex/tensorflow,davidzchen/tensorflow,arborh/tensorflow,adit-chandra/tensorflow,gunan/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,DavidNorman/tensorflow,aam-at/tensorflow,jhseu/tensorflow,ppwwyyxx/tensorflow,cxxgtxy/tensorflow,adit-chandra/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,aldian/tensorflow,freedomtan/tensorflow,arborh/tensorflow,gunan/tensorflow,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,davidzchen/tensorflow,chemelnucfin/tensorflow,arborh/tensorflow,tensorflow/tensorflow,aldian/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,arborh/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,xzturn/tensorflow,frreiss/tensorflow-fred,freedomtan/tensorflow,adit-chandra/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,adit-chandra/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,ppwwyyxx/tensorflow,jhseu/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,frreiss/tensorflow-fred,xzturn/tensorflow,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ppwwyyxx/tensorflow,adit-chandra/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_saved_model | third_party/mlir/tblgen.bzl | third_party/mlir/tblgen.bzl | """BUILD extensions for MLIR table generation."""
def gentbl(name, tblgen, td_file, tbl_outs, td_srcs = [], td_includes = [], strip_include_prefix = None):
"""gentbl() generates tabular code from a table definition file.
Args:
name: The name of the build rule for use in dependencies.
tblgen: The binary used to produce the output.
td_file: The primary table definitions file.
tbl_outs: A list of tuples (opts, out), where each opts is a string of
options passed to tblgen, and the out is the corresponding output file
produced.
td_srcs: A list of table definition files included transitively.
td_includes: A list of include paths for relative includes.
strip_include_prefix: attribute to pass through to cc_library.
"""
srcs = []
srcs += td_srcs
if td_file not in td_srcs:
srcs += [td_file]
# Add google_mlir/include directory as include so derived op td files can
# import relative to that.
td_includes_str = "-I external/local_config_mlir/include -I external/org_tensorflow "
for td_include in td_includes:
td_includes_str += "-I %s " % td_include
td_includes_str += "-I $$(dirname $(location %s)) " % td_file
for (opts, out) in tbl_outs:
rule_suffix = "_".join(opts.replace("-", "_").replace("=", "_").split(" "))
native.genrule(
name = "%s_%s_genrule" % (name, rule_suffix),
srcs = srcs,
outs = [out],
tools = [tblgen],
message = "Generating code from table: %s" % td_file,
cmd = (("$(location %s) %s %s $(location %s) -o $@") % (
tblgen,
td_includes_str,
opts,
td_file,
)),
)
# List of opts that do not generate cc files.
skip_opts = ["-gen-op-doc"]
hdrs = [f for (opts, f) in tbl_outs if opts not in skip_opts]
native.cc_library(
name = name,
# include_prefix does not apply to textual_hdrs.
hdrs = hdrs if strip_include_prefix else [],
strip_include_prefix = strip_include_prefix,
textual_hdrs = hdrs,
)
| """BUILD extensions for MLIR table generation."""
def gentbl(name, tblgen, td_file, tbl_outs, td_srcs = [], strip_include_prefix = None):
"""gentbl() generates tabular code from a table definition file.
Args:
name: The name of the build rule for use in dependencies.
tblgen: The binary used to produce the output.
td_file: The primary table definitions file.
tbl_outs: A list of tuples (opts, out), where each opts is a string of
options passed to tblgen, and the out is the corresponding output file
produced.
td_srcs: A list of table definition files included transitively.
strip_include_prefix: attribute to pass through to cc_library.
"""
srcs = []
srcs += td_srcs
if td_file not in td_srcs:
srcs += [td_file]
# Add google_mlir/include directory as include so derived op td files can
# import relative to that.
td_includes = "-I external/local_config_mlir/include -I external/org_tensorflow "
td_includes += "-I $$(dirname $(location %s)) " % td_file
for (opts, out) in tbl_outs:
rule_suffix = "_".join(opts.replace("-", "_").replace("=", "_").split(" "))
native.genrule(
name = "%s_%s_genrule" % (name, rule_suffix),
srcs = srcs,
outs = [out],
tools = [tblgen],
message = "Generating code from table: %s" % td_file,
cmd = (("$(location %s) %s %s $(location %s) -o $@") % (
tblgen,
td_includes,
opts,
td_file,
)),
)
# List of opts that do not generate cc files.
skip_opts = ["-gen-op-doc"]
hdrs = [f for (opts, f) in tbl_outs if opts not in skip_opts]
native.cc_library(
name = name,
# include_prefix does not apply to textual_hdrs.
hdrs = hdrs if strip_include_prefix else [],
strip_include_prefix = strip_include_prefix,
textual_hdrs = hdrs,
)
| apache-2.0 | Python |
45b778c637d263208699a16ba926f0da10d5b0f4 | Fix incorrect behaviour with check.py | JuhaniImberg/tmc.py,JuhaniImberg/tmc.py | tmc/exercise_tests/check.py | tmc/exercise_tests/check.py | import re
import xml.etree.ElementTree as ET
from os import path
from tmc.exercise_tests.basetest import BaseTest, TestResult
class CheckTest(BaseTest):
def __init__(self):
super().__init__("Check")
def applies_to(self, exercise):
return path.isfile(path.join(exercise.path(), "Makefile"))
def test(self, exercise):
_, _, err = self.run(["make", "clean", "all", "run-test"], exercise)
ret = []
testpath = path.join(exercise.path(), "test", "tmc_test_results.xml")
if not path.isfile(testpath):
return [TestResult(success=False, message=err)]
if len(err) > 0:
ret.append(TestResult(message=err, warning=True))
xmlsrc = ""
with open(testpath) as fp:
xmlsrc = fp.read()
xmlsrc = re.sub(r"&(\s)", r"&\1", xmlsrc)
ns = "{http://check.sourceforge.net/ns}"
matchtest = ns + "test"
matchdesc = ns + "description"
matchmsg = ns + "message"
root = ET.fromstring(xmlsrc)
for test in root.iter(matchtest):
name = test.find(matchdesc).text
if test.get("result") in ["failure", "error"]:
success = False
message = test.find(matchmsg).text
message = message.replace(r"&", "&")
else:
success = True
message = ""
ret.append(TestResult(success=success, name=name, message=message))
return ret
| import re
import xml.etree.ElementTree as ET
from os import path
from tmc.exercise_tests.basetest import BaseTest, TestResult
class CheckTest(BaseTest):
def __init__(self):
super().__init__("Check")
def applies_to(self, exercise):
return path.isfile(path.join(exercise.path(), "Makefile"))
def test(self, exercise):
_, _, err = self.run(["make", "clean", "all", "run-test"], exercise)
ret = []
testpath = path.join(exercise.path(), "test", "tmc_test_results.xml")
if not path.isfile(testpath):
return [TestResult(success=False, message=err)]
if len(err) > 0:
ret.append(TestResult(message=err, warning=True))
xmlsrc = ""
with open(testpath) as fp:
xmlsrc = fp.read()
xmlsrc = re.sub(r"&(\s)", r"&\1", xmlsrc)
ns = "{http://check.sourceforge.net/ns}"
matchtest = ns + "test"
matchdesc = ns + "description"
matchmsg = ns + "message"
root = ET.fromstring(xmlsrc)
for test in root.iter(matchtest):
success = True
name = test.find(matchdesc).text
message = None
if test.get("result") == "failure":
success = False
message = test.find(matchmsg).text
message = message.replace(r"&", "&")
ret.append(TestResult(success=success, name=name, message=message))
return ret
| mit | Python |
85aa5449a040247a6156801e88857048a7db6dd5 | update revnum | ipython/ipython,ipython/ipython | IPython/Release.py | IPython/Release.py | # -*- coding: utf-8 -*-
"""Release data for the IPython project.
$Id: Release.py 2446 2007-06-14 22:30:58Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <[email protected]>
#
# Copyright (c) 2001 Janko Hauser <[email protected]> and Nathaniel Gray
# <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '2445'
version = '0.8.2.svn.r' + revision.rstrip('M')
description = "An enhanced interactive Python shell."
long_description = \
"""
IPython provides a replacement for the interactive Python interpreter with
extra functionality.
Main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available at the IPython subversion
repository_.
.. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','[email protected]'),
'Janko' : ('Janko Hauser','[email protected]'),
'Nathan' : ('Nathaniel Gray','[email protected]'),
'Ville' : ('Ville Vainio','[email protected]')
}
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell']
| # -*- coding: utf-8 -*-
"""Release data for the IPython project.
$Id: Release.py 2409 2007-05-28 18:45:23Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <[email protected]>
#
# Copyright (c) 2001 Janko Hauser <[email protected]> and Nathaniel Gray
# <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '2408'
version = '0.8.2.svn.r' + revision.rstrip('M')
description = "An enhanced interactive Python shell."
long_description = \
"""
IPython provides a replacement for the interactive Python interpreter with
extra functionality.
Main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available at the IPython subversion
repository_.
.. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','[email protected]'),
'Janko' : ('Janko Hauser','[email protected]'),
'Nathan' : ('Nathaniel Gray','[email protected]'),
'Ville' : ('Ville Vainio','[email protected]')
}
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell']
| bsd-3-clause | Python |
5e409ec1d8d53cd3005022ff090043a9e5f5cb31 | Update nyan.py | Rayvenden/python-utilities | NyanCheck/nyan.py | NyanCheck/nyan.py | #!/usr/bin/python3
from gi.repository import Gtk
from gi.repository import GObject
import webbrowser
import urllib.request
import re
def getNyan():
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36"
r = urllib.request.Request("http://nyanyan.it/", headers={'User-Agent': USER_AGENT, 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'})
data = urllib.request.urlopen(r)
data = data.read()
found = re.findall( '<div class="tytul">.*<div class="stronicowanieD" style="width:700px;margin-left:20px">', str(data) )
return found[0]
class nyanIcon:
def __init__( self ):
self.site = getNyan()
self.trayicon = Gtk.StatusIcon()
self.trayicon.set_from_file( "normal.png" )
self.trayicon.set_visible( True )
self.trayicon.connect( "activate", self.openNyan )
self.trayicon.connect( "popup-menu", self.options )
GObject.timeout_add( 5000, self.checkNyan )
Gtk.main()
def options( self, icon, button, time ):
self.menu = Gtk.Menu()
exit = Gtk.MenuItem()
exit.set_label( "Exit" )
exit.connect( "activate", Gtk.main_quit )
self.menu.append( exit )
self.menu.show_all()
def pos( menu, icon):
return (Gtk.StatusIcon.position_menu(menu, icon))
self.menu.popup(None, None, pos, self.trayicon, button, time)
def checkNyan( self, *args ):
tempsite = getNyan()
if tempsite != self.site:
self.site = tempsite
self.trayicon.set_from_file( "new.png" )
GObject.timeout_add( 60000*5, self.checkNyan )
def openNyan( self, *args ):
self.trayicon.set_from_file( "normal.png" )
webbrowser.open( "http://nyanyan.it/" )
app = nyanIcon()
| #!/usr/bin/python3
from gi.repository import Gtk
from gi.repository import GObject
import webbrowser
import urllib.request
import re
def getNyan():
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36"
r = urllib.request.Request("http://nyanyan.it/", headers={'User-Agent': USER_AGENT, 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'})
data = urllib.request.urlopen(r)
data = data.read()
found = re.findall( '<div class="tytul">.*<div class="stronicowanieD" style="width:700px;margin-left:20px">', str(data) )
return found[0]
class nyanIcon:
def __init__( self ):
self.site = getNyan()
self.trayicon = Gtk.StatusIcon()
self.trayicon.set_from_file( "normal.png" )
self.trayicon.set_visible( True )
self.trayicon.connect( "activate", self.openNyan )
self.trayicon.connect( "popup-menu", self.options )
GObject.timeout_add( 5000, self.checkNyan )
Gtk.main()
def options( self, icon, button, time ):
self.menu = Gtk.Menu()
exit = Gtk.MenuItem()
exit.set_label( "Exit" )
exit.connect( "activate", Gtk.main_quit )
self.menu.append( exit )
self.menu.show_all()
def pos( menu, icon):
return (Gtk.StatusIcon.position_menu(menu, icon))
self.menu.popup(None, None, pos, self.trayicon, button, time)
def checkNyan( self, *args ):
"""Checks for new posts on http://nyanyan.it/
Takes no arguments and return true if there is new post.
"""
tempsite = getNyan()
if tempsite != self.site:
self.site = tempsite
self.trayicon.set_from_file( "new.png" )
GObject.timeout_add( 60000*5, self.checkNyan )
def openNyan( self, *args ):
self.trayicon.set_from_file( "normal.png" )
webbrowser.open( "http://nyanyan.it/" )
app = nyanIcon() | bsd-3-clause | Python |
992dc795d1f7c7ef670832a5144b7e72a9374af8 | update test_forms | project-callisto/callisto-core,SexualHealthInnovations/callisto-core,project-callisto/callisto-core,SexualHealthInnovations/django-wizard-builder,SexualHealthInnovations/django-wizard-builder,SexualHealthInnovations/callisto-core | wizard_builder/tests/test_forms.py | wizard_builder/tests/test_forms.py | from django.test import TestCase
from .. import managers
class FormSerializationTest(TestCase):
manager = managers.FormManager
fixtures = [
'wizard_builder_data',
]
expected_data = [{
'descriptive_text': 'answer wisely',
'field_id': 'question_2',
'id': 2,
'page': 2,
'position': 0,
'question_text': 'do androids dream of electric sheep?',
'text': 'do androids dream of electric sheep?',
'type': 'singlelinetext',
'choices': [],
}]
@classmethod
def setUpClass(cls):
super().setUpClass()
form = cls.manager.get_form_models()[1]
cls.actual_data = form.serialized
def test_same_size(self):
actual_data = self.actual_data
expected_data = self.expected_data
self.assertEqual(
len(actual_data),
len(expected_data),
)
def test_same_questions(self):
actual_data = self.actual_data
expected_data = self.expected_data
for index, expected_question in enumerate(expected_data):
actual_question = actual_data[index]
self.assertEqual(
actual_question,
expected_question,
)
| from django.test import TestCase
from .. import managers
class FormSerializationTest(TestCase):
manager = managers.FormManager
fixtures = [
'wizard_builder_data',
]
expected_data = [{
'descriptive_text': 'answer wisely',
'field_id': 'question_2',
'id': 2,
'page': 2,
'position': 0,
'question_text': 'do androids dream of electric sheep?',
'text': 'do androids dream of electric sheep?',
'type': 'singlelinetext',
'is_dropdown': False,
'choices': [],
}]
@classmethod
def setUpClass(cls):
super().setUpClass()
form = cls.manager.get_form_models()[1]
cls.actual_data = form.serialized
def test_same_size(self):
actual_data = self.actual_data
expected_data = self.expected_data
self.assertEqual(
len(actual_data),
len(expected_data),
)
def test_same_questions(self):
actual_data = self.actual_data
expected_data = self.expected_data
for index, expected_question in enumerate(expected_data):
actual_question = actual_data[index]
self.assertEqual(
actual_question,
expected_question,
)
| agpl-3.0 | Python |
a9465bcfe387a3eb8ba730eeda5285be079044d3 | test cleanup | project-callisto/callisto-core,SexualHealthInnovations/django-wizard-builder,SexualHealthInnovations/callisto-core,SexualHealthInnovations/django-wizard-builder,project-callisto/callisto-core,SexualHealthInnovations/callisto-core,scattermagic/django-wizard-builder,scattermagic/django-wizard-builder | wizard_builder/tests/test_views.py | wizard_builder/tests/test_views.py | from unittest import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from .. import view_helpers
class ViewTest(TestCase):
fixtures = [
'wizard_builder_data',
]
@classmethod
def setUpClass(cls):
settings.SITE_ID = 1
super().setUpClass()
def setUp(self):
super().setUp()
self.step = '1'
self.data = {'question_2': 'aloe ipsum speakerbox'}
self.storage_data = {self.step: self.data}
def test_storage_receives_post_data(self):
url = reverse('wizard_update', kwargs={'step': self.step})
self.client.post(url, self.data)
self.assertEqual(
self.client.session['data'],
self.storage_data,
)
| from unittest import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from .. import view_helpers
class ViewTest(TestCase):
fixtures = [
'wizard_builder_data',
]
@classmethod
def setUpClass(cls):
settings.SITE_ID = 1
super().setUpClass()
def test_storage_receives_post_data(self):
step = '1'
url = reverse('wizard_update', kwargs={'step': step})
data = {'question_2': 'aloe ipsum speakerbox'}
storage_data = {step: data}
self.client.post(url, data)
self.assertEqual(
self.client.session['data'],
storage_data,
)
| agpl-3.0 | Python |
12b806a0c68ceb146eed3b4a9406f36e9f930ba6 | Fix bug with closing socket without creating it again. | harvitronix/rl-rc-car | rl-rc-car/sensor_client.py | rl-rc-car/sensor_client.py | """
This is used to gather our readings from the remote sensor server.
http://ilab.cs.byu.edu/python/socket/echoclient.html
"""
import socket
import numpy as np
import time
class SensorClient:
def __init__(self, host='192.168.2.9', port=8888, size=1024):
self.host = host
self.port = port
self.size = size
def get_readings(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
readings = s.recv(self.size)
s.close()
# Turn our weird stringed list into an actual list.
readings = readings.decode('utf-8')
readings = readings[1:-1]
readings = readings.split(', ')
readings = [float(i) for i in readings]
# Numpy it.
return np.array([readings])
if __name__ == '__main__':
# Testing it out.
from becho import becho, bechonet
network = bechonet.BechoNet(
num_actions=6, num_inputs=3,
nodes_1=256, nodes_2=256, verbose=True,
load_weights=True,
weights_file='saved-models/sonar-and-ir-9750.h5')
pb = becho.ProjectBecho(
network, num_actions=6, num_inputs=3,
verbose=True, enable_training=False)
sensors = SensorClient()
while True:
# Get the reading.
readings = sensors.get_readings()
print(readings)
# Get the action.
action = pb.get_action(readings)
print("Doing action %d" % action)
time.sleep(0.5)
| """
This is used to gather our readings from the remote sensor server.
http://ilab.cs.byu.edu/python/socket/echoclient.html
"""
import socket
import numpy as np
import time
class SensorClient:
def __init__(self, host='192.168.2.9', port=8888, size=1024):
self.host = host
self.port = port
self.size = size
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def get_readings(self):
self.s.connect((self.host, self.port))
readings = self.s.recv(self.size)
self.s.close()
# Turn our weird stringed list into an actual list.
readings = readings.decode('utf-8')
readings = readings[1:-1]
readings = readings.split(', ')
readings = [float(i) for i in readings]
# Numpy it.
return np.array([readings])
if __name__ == '__main__':
# Testing it out.
from becho import becho, bechonet
network = bechonet.BechoNet(
num_actions=6, num_inputs=3,
nodes_1=256, nodes_2=256, verbose=True,
load_weights=True,
weights_file='saved-models/sonar-and-ir-9750.h5')
pb = becho.ProjectBecho(
network, num_actions=6, num_inputs=3,
verbose=True, enable_training=False)
sensors = SensorClient()
while True:
# Get the reading.
readings = sensors.get_readings()
print(readings)
# Get the action.
action = pb.get_action(readings)
print("Doing action %d" % action)
time.sleep(0.5)
| mit | Python |
a45d744a73c4ac54990854655dfec7e57df67eb4 | Add the device keyword to the array creation functions | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | numpy/_array_api/creation_functions.py | numpy/_array_api/creation_functions.py | def arange(start, /, *, stop=None, step=1, dtype=None, device=None):
from .. import arange
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return arange(start, stop=stop, step=step, dtype=dtype)
def empty(shape, /, *, dtype=None, device=None):
from .. import empty
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return empty(shape, dtype=dtype)
def empty_like(x, /, *, dtype=None, device=None):
from .. import empty_like
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return empty_like(x, dtype=dtype)
def eye(N, /, *, M=None, k=0, dtype=None, device=None):
from .. import eye
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return eye(N, M=M, k=k, dtype=dtype)
def full(shape, fill_value, /, *, dtype=None, device=None):
from .. import full
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, /, *, dtype=None, device=None):
from .. import full_like
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return full_like(x, fill_value, dtype=dtype)
def linspace(start, stop, num, /, *, dtype=None, device=None, endpoint=True):
from .. import linspace
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return linspace(start, stop, num, dtype=dtype, endpoint=endpoint)
def ones(shape, /, *, dtype=None, device=None):
from .. import ones
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return ones(shape, dtype=dtype)
def ones_like(x, /, *, dtype=None, device=None):
from .. import ones_like
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return ones_like(x, dtype=dtype)
def zeros(shape, /, *, dtype=None, device=None):
from .. import zeros
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return zeros(shape, dtype=dtype)
def zeros_like(x, /, *, dtype=None, device=None):
from .. import zeros_like
if device is not None:
# Note: Device support is not yet implemented on ndarray
raise NotImplementedError("Device support is not yet implemented")
return zeros_like(x, dtype=dtype)
__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like']
| def arange(start, /, *, stop=None, step=1, dtype=None):
from .. import arange
return arange(start, stop=stop, step=step, dtype=dtype)
def empty(shape, /, *, dtype=None):
from .. import empty
return empty(shape, dtype=dtype)
def empty_like(x, /, *, dtype=None):
from .. import empty_like
return empty_like(x, dtype=dtype)
def eye(N, /, *, M=None, k=0, dtype=None):
from .. import eye
return eye(N, M=M, k=k, dtype=dtype)
def full(shape, fill_value, /, *, dtype=None):
from .. import full
return full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, /, *, dtype=None):
from .. import full_like
return full_like(x, fill_value, dtype=dtype)
def linspace(start, stop, num, /, *, dtype=None, endpoint=True):
from .. import linspace
return linspace(start, stop, num, dtype=dtype, endpoint=endpoint)
def ones(shape, /, *, dtype=None):
from .. import ones
return ones(shape, dtype=dtype)
def ones_like(x, /, *, dtype=None):
from .. import ones_like
return ones_like(x, dtype=dtype)
def zeros(shape, /, *, dtype=None):
from .. import zeros
return zeros(shape, dtype=dtype)
def zeros_like(x, /, *, dtype=None):
from .. import zeros_like
return zeros_like(x, dtype=dtype)
__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like']
| mit | Python |
8dcb778c62c3c6722e2f6dabfd97f6f75c349e62 | Set celery max tasks child to 1 | puruckertom/ubertool_ecorest,quanted/ubertool_ecorest,puruckertom/ubertool_ecorest,quanted/ubertool_ecorest,quanted/ubertool_ecorest,puruckertom/ubertool_ecorest,puruckertom/ubertool_ecorest,quanted/ubertool_ecorest | celery_cgi.py | celery_cgi.py | import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=True,
CELERY_TRACK_STARTED=True,
worker_max_tasks_per_child = 1,
worker_max_memory_per_child = 50000
)
| import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=True,
CELERY_TRACK_STARTED=True,
worker_max_memory_per_child = 50000
)
| unlicense | Python |
3d6fcb5c5ef05224f0129caf58507b555d17f35d | Fix indentation error in Flask | watson-developer-cloud/python-primer-companion-code,watson-developer-cloud/python-primer-companion-code,watson-developer-cloud/python-primer-companion-code,watson-developer-cloud/python-primer-companion-code | episode-2/flask/src/translation.py | episode-2/flask/src/translation.py | # -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from watson_developer_cloud import LanguageTranslationV2 as LanguageTranslationService
def getTranslationService():
return LanguageTranslationService(username='<your username key for the Watson language translation service>',
password='<your password key for the service>')
def identifyLanguage(app, data):
txt = data.encode("utf-8", "replace")
language_translation = getTranslationService()
langsdetected = language_translation.identify(txt)
app.logger.info(json.dumps(langsdetected, indent=2))
primarylang = langsdetected["languages"][0]
retData = {key: primarylang[key] for key in ('language', 'confidence')}
app.logger.info(json.dumps(retData, indent=2))
return retData
def checkForTranslation(app, fromlang, tolang):
supportedModels = []
lt = getTranslationService()
models = lt.list_models()
modelList = models.get("models")
supportedModels = [model['model_id'] for model in modelList
if fromlang == model['source']
and tolang == model['target']]
return supportedModels
def performTranslation(app, txt, primarylang, targetlang):
lt = getTranslationService()
translation = lt.translate(txt, source=primarylang, target=targetlang)
theTranslation = None
if translation and ("translations" in translation):
theTranslation = translation['translations'][0]['translation']
return theTranslation
| # -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from watson_developer_cloud import LanguageTranslationV2 as LanguageTranslationService
def getTranslationService():
return LanguageTranslationService(username='<your username key for the Watson language translation service>',
password='<your password key for the service>')
def identifyLanguage(app, data):
txt = data.encode("utf-8", "replace")
language_translation = getTranslationService()
langsdetected = language_translation.identify(txt)
app.logger.info(json.dumps(langsdetected, indent=2))
primarylang = langsdetected["languages"][0]
retData = {key: primarylang[key] for key in ('language', 'confidence')}
app.logger.info(json.dumps(retData, indent=2))
return retData
def checkForTranslation(app, fromlang, tolang):
supportedModels = []
lt = getTranslationService()
models = lt.list_models()
modelList = models.get("models")
supportedModels = [model['model_id'] for model in modelList
if fromlang == model['source']
and tolang == model['target']]
return supportedModels
def performTranslation(app, txt, primarylang, targetlang):
lt = getTranslationService()
translation = lt.translate(txt, source=primarylang, target=targetlang)
theTranslation = None
if translation and ("translations" in translation):
theTranslation = translation['translations'][0]['translation']
return theTranslation
| apache-2.0 | Python |
4026d575cac94d98f8fa5467674020b18442359d | Update h-index.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,githubutilities/LeetCode,kamyu104/LeetCode,githubutilities/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode | Python/h-index.py | Python/h-index.py | # Time: O(nlogn)
# Space: O(1)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for i, x in enumerate(citations):
if x >= i + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
return sum(1 if x >= i + 1 else 0 for i, x in enumerate(sorted(citations, reverse=True)))
| # Time: O(nlogn)
# Space: O(1)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for i, x in enumerate(citations):
if x >= i + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
sorted(citations, reverse=True)
h = 0
return sum(1 if x >= i + 1 else 0 for i, x in enumerate(sorted(citations, reverse=True)))
| mit | Python |
8deb3e45511950cc1a5d317f79f30bf59ed4821a | Update Changedate | ricaportela/convert-data-nbf,ricaportela/convert-data-nbf | changedate.py | changedate.py | """ Calcular Data a partir de uma quantidade de minutos """
def alterar_data(data_ent, op, minutos_ent):
""" Calcular nova data """
spl_Data_ent, spl_Hora_ent = data_ent.split(" ", 2)
spl_Dia_ent, spl_Mes_ent, spl_Ano_ent = spl_Data_ent.split("/", 3)
spl_Hora_ent, spl_Minu_ent = spl_Hora_ent.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos totais
Minutos_Totais = (int(spl_Hora_ent) * 60) + int(spl_Minu_ent) + minutos_ent
print("Total de Minutos ", Minutos_Totais)
# 5415 / 60 minutos = 90.25 => separar inteiro de casas decimais 0.25 * 60 = 15
# HORAS_CONV_MINUTOS = MIN_TOT_E / 60
# 90h e 15 min
#I, D = divmod(HORAS_CONV_MINUTOS, 1)
#RESTO_MINUTOS = D * 60
# 90h / 24h = 3.75 => separar inteiro de casas decimais = 0.75 / 24
#TOTAL_DIAS = QTDE_TOTAL_HORAS / 24
#I, D = divmod(TOTAL_DIAS, 1)
# 3d 3.75 (0.75 * 24) = 18 h
#TOTAL_HORAS2 = D * 24
#print(int(I), " Dias", int(TOTAL_HORAS2), " horas", int(TOTAL_MINUTOS), " minutos")
if __name__ == ("__main__"):
alterar_data("31/12/2016 23:35","+", 4000)
| """ Calcular Data a partir de uma quantidade de minutos """
MINUTOSENTRADA = 4000
OPERADOR = "+"
DATA_E, HORA_E = "31/12/2016 23:35".split(" ", 2)
DIA_E, MES_E, ANO_E = DATA_E.split("/", 3)
HR_E, MINU_E = HORA_E.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos
MIN_TOT_E = (int(HR_E) * 60) + int(MINU_E) + MINUTOSENTRADA
print("Total de Minutos ", MIN_TOT_E)
# 5415 / 60 minutos = 90.25 = .25 * 60
TOTAL_HORAS = MIN_TOT_E / 60
# 90h e 15 mine
I, D = divmod(TOTAL_HORAS, 1)
TOTAL_MINUTOS = D * 60
# 90h / 24h = 3.75 3 dias
TOTAL_DIAS = TOTAL_HORAS / 24
I, D = divmod(TOTAL_DIAS, 1)
# 3d 3.75 (0.75 * 24) = 18 h
TOTAL_HORAS2 = D * 24
print(int(I), " Dias", int(TOTAL_HORAS2), " horas", int(TOTAL_MINUTOS), " minutos")
# 3d 18h e 15min
# 4000 min / 60 min = No. de horas 66.66
# 66h e 40 min ... peguei a dízima e multipliquei por 66*60
# Então fica assim...
# 66 h / 24 h = No. de dias
# Agora pego o número de dias
# 2d 2.75 (dizima 0.75 * 24)
# 0,75 * 24 = 18 h
# 2D 18H 40M
| mit | Python |
62a2b5ab62a5c1080cdc30e3334cc62f4a51d6a9 | Make job mode API update change. | eurekaclinical/eureka-python-client | eurekaclinical/analytics/client.py | eurekaclinical/analytics/client.py | from eurekaclinical import APISession, API, Struct, construct_api_session_context_manager
class Job(Struct):
def __init__(self):
super(Job, self).__init__()
self.sourceConfigId = None
self.destinationId = None
self.dateRangePhenotypeKey = None
self.earliestDate = None
self.earliestDateSide = 'START'
self.latestDate = None
self.latestDateSide = 'START'
self.jobMode = 'UPDATE'
self.prompts = None
self.propositionIds = []
self.name = None
class Users(API):
def __init__(self, *args, **kwargs):
super(Users, self).__init__('/users/', *args, **kwargs)
def me(self):
return self._get(self.rest_endpoint + "me")
class Phenotypes(API):
def __init__(self, *args, **kwargs):
super(Phenotypes, self).__init__('/phenotypes/', *args, **kwargs)
class Concepts(API):
def __init__(self, *args, **kwargs):
super(Concepts, self).__init__('/concepts/', *args, **kwargs)
def get(self, key, summarize=False):
return self._get(self.rest_endpoint + key + "?summarize=" + str(summarize))
class Jobs(API):
def __init__(self, *args, **kwargs):
super(Jobs, self).__init__('/jobs/', *args, **kwargs)
def submit(self, job):
return self._post(self.rest_endpoint, job)
class AnalyticsSession(APISession):
def __init__(self, cas_session,
api_url='https://localhost:8000/eureka-webapp', verify_api_cert=True):
super(AnalyticsSession, self).__init__(cas_session, api_url=api_url, verify_api_cert=verify_api_cert)
self.__api_args = (cas_session, verify_api_cert, api_url)
@property
def users(self):
return Users(*self.__api_args)
@property
def phenotypes(self):
return Phenotypes(*self.__api_args)
@property
def concepts(self):
return Concepts(*self.__api_args)
@property
def jobs(self):
return Jobs(*self.__api_args)
get_session = construct_api_session_context_manager(AnalyticsSession)
| from eurekaclinical import APISession, API, Struct, construct_api_session_context_manager
class Job(Struct):
def __init__(self):
super(Job, self).__init__()
self.sourceConfigId = None
self.destinationId = None
self.dateRangePhenotypeKey = None
self.earliestDate = None
self.earliestDateSide = 'START'
self.latestDate = None
self.latestDateSide = 'START'
self.updateData = False
self.prompts = None
self.propositionIds = []
self.name = None
class Users(API):
def __init__(self, *args, **kwargs):
super(Users, self).__init__('/users/', *args, **kwargs)
def me(self):
return self._get(self.rest_endpoint + "me")
class Phenotypes(API):
def __init__(self, *args, **kwargs):
super(Phenotypes, self).__init__('/phenotypes/', *args, **kwargs)
class Concepts(API):
def __init__(self, *args, **kwargs):
super(Concepts, self).__init__('/concepts/', *args, **kwargs)
def get(self, key, summarize=False):
return self._get(self.rest_endpoint + key + "?summarize=" + str(summarize))
class Jobs(API):
def __init__(self, *args, **kwargs):
super(Jobs, self).__init__('/jobs/', *args, **kwargs)
def submit(self, job):
return self._post(self.rest_endpoint, job)
class AnalyticsSession(APISession):
def __init__(self, cas_session,
api_url='https://localhost:8000/eureka-webapp', verify_api_cert=True):
super(AnalyticsSession, self).__init__(cas_session, api_url=api_url, verify_api_cert=verify_api_cert)
self.__api_args = (cas_session, verify_api_cert, api_url)
@property
def users(self):
return Users(*self.__api_args)
@property
def phenotypes(self):
return Phenotypes(*self.__api_args)
@property
def concepts(self):
return Concepts(*self.__api_args)
@property
def jobs(self):
return Jobs(*self.__api_args)
get_session = construct_api_session_context_manager(AnalyticsSession)
| apache-2.0 | Python |
11cc0c5f8aae526eddb372fbe339f649f2c654eb | Update pattern for inline comments to allow anything after '#' | hackebrot/poyo | poyo/patterns.py | poyo/patterns.py | # -*- coding: utf-8 -*-
INDENT = r"(?P<indent>^ *)"
VARIABLE = r"(?P<variable>.+):"
VALUE = r"(?P<value>((?P<q2>['\"]).*?(?P=q2))|[^#]+?)"
NEWLINE = r"$\n"
BLANK = r" +"
INLINE_COMMENT = r"( +#.*)?"
COMMENT = r"^ *#.*" + NEWLINE
BLANK_LINE = r"^[ \t]*" + NEWLINE
SECTION = INDENT + VARIABLE + INLINE_COMMENT + NEWLINE
SIMPLE = INDENT + VARIABLE + BLANK + VALUE + INLINE_COMMENT + NEWLINE
NULL = r"null|Null|NULL|~"
TRUE = r"true|True|TRUE"
FALSE = r"false|False|FALSE"
INT = r"[-+]?[0-9]+"
FLOAT = r"([-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?)"
STR = r"(?P<quotes>['\"]?).*(?P=quotes)"
| # -*- coding: utf-8 -*-
INDENT = r"(?P<indent>^ *)"
VARIABLE = r"(?P<variable>.+):"
VALUE = r"(?P<value>((?P<q2>['\"]).*?(?P=q2))|[^#]+?)"
NEWLINE = r"$\n"
BLANK = r" +"
INLINE_COMMENT = r"( +#\w*)?"
COMMENT = r"^ *#.*" + NEWLINE
BLANK_LINE = r"^[ \t]*" + NEWLINE
SECTION = INDENT + VARIABLE + INLINE_COMMENT + NEWLINE
SIMPLE = INDENT + VARIABLE + BLANK + VALUE + INLINE_COMMENT + NEWLINE
NULL = r"null|Null|NULL|~"
TRUE = r"true|True|TRUE"
FALSE = r"false|False|FALSE"
INT = r"[-+]?[0-9]+"
FLOAT = r"([-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?)"
STR = r"(?P<quotes>['\"]?).*(?P=quotes)"
| mit | Python |
0f9b5bdba841d707e236bb8ed8df5ba4aa7806c2 | Allow a None value for os_config_path. | 13steinj/praw,13steinj/praw,voussoir/reddit,tehp/reddit,nmtake/praw,tehp/reddit,voussoir/reddit,iAmMrinal0/reddit,TacticalGoat/reddit,RGood/praw,darthkedrik/praw,gschizas/praw,tehp/reddit,leviroth/praw,TacticalGoat/reddit,darthkedrik/praw,leviroth/praw,praw-dev/praw,iAmMrinal0/reddit,RGood/praw,nmtake/praw,gschizas/praw,praw-dev/praw | praw/settings.py | praw/settings.py | # This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from praw.compat import configparser # pylint: disable-msg=E0611
def _load_configuration():
config = configparser.RawConfigParser()
module_dir = os.path.dirname(sys.modules[__name__].__file__)
if 'APPDATA' in os.environ: # Windows
os_config_path = os.environ['APPDATA']
elif 'XDG_CONFIG_HOME' in os.environ: # Modern Linux
os_config_path = os.environ['XDG_CONFIG_HOME']
elif 'HOME' in os.environ: # Legacy Linux
os_config_path = os.path.join(os.environ['HOME'], '.config')
else:
os_config_path = None
locations = [os.path.join(module_dir, 'praw.ini'),
'praw.ini']
if os_config_path is not None:
locations.insert(1,os.path.join(os_config_path, 'praw.ini'))
if not config.read(locations):
raise Exception('Could not find config file in any of: %s' % locations)
return config
CONFIG = _load_configuration()
| # This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from praw.compat import configparser # pylint: disable-msg=E0611
def _load_configuration():
config = configparser.RawConfigParser()
module_dir = os.path.dirname(sys.modules[__name__].__file__)
if 'APPDATA' in os.environ: # Windows
os_config_path = os.environ['APPDATA']
elif 'XDG_CONFIG_HOME' in os.environ: # Modern Linux
os_config_path = os.environ['XDG_CONFIG_HOME']
else: # Legacy Linux
os_config_path = os.path.join(os.environ['HOME'], '.config')
locations = [os.path.join(module_dir, 'praw.ini'),
os.path.join(os_config_path, 'praw.ini'),
'praw.ini']
if not config.read(locations):
raise Exception('Could not find config file in any of: %s' % locations)
return config
CONFIG = _load_configuration()
| bsd-2-clause | Python |
d84a4efcf880bb668b2721af3f4ce18220e8baab | Use np.genfromtext to handle missing values | jonathansick/xvistaprof | xvistaprof/reader.py | xvistaprof/reader.py | #!/usr/bin/env python
# encoding: utf-8
"""
Reader for XVISTA .prof tables.
"""
import numpy as np
from astropy.table import Table
from astropy.io import registry
def xvista_table_reader(filename):
dt = [('R', np.float), ('SB', np.float), ('SB_err', np.float),
('ELL', np.float), ('PA', np.float), ('EMAG', np.float),
('ELLMAG', np.float), ('ELLMAG_err', np.float), ('XC', np.float),
('YC', np.float), ('FRACONT', np.float), ('A1', np.float),
('A2', np.float), ('A4', np.float), ('CIRCMAG', np.float)]
data = np.genfromtxt(filename, dtype=np.dtype(dt), skiprows=15,
missing_values='*', filling_values=np.nan)
return Table(data)
registry.register_reader('xvistaprof', Table, xvista_table_reader)
| #!/usr/bin/env python
# encoding: utf-8
"""
Reader for XVISTA .prof tables.
"""
import numpy as np
from astropy.table import Table
from astropy.io import registry
def xvista_table_reader(filename):
dt = [('R', np.float), ('SB', np.float), ('SB_err', np.float),
('ELL', np.float), ('PA', np.float), ('EMAG', np.float),
('ELLMAG', np.float), ('ELLMAG_err', np.float), ('XC', np.float),
('YC', np.float), ('FRACONT', np.float), ('A1', np.float),
('A2', np.float), ('A4', np.float), ('CIRCMAG', np.float)]
data = np.loadtxt(filename, dtype=np.dtype(dt), skiprows=15)
return Table(data)
registry.register_reader('xvistaprof', Table, xvista_table_reader)
| bsd-2-clause | Python |
0dde9454d05a6d5533454fbac8996c560d007c67 | make a proper hook/task split in cython. | abadger/Bento,abadger/Bento,abadger/Bento,abadger/Bento,cournape/Bento,cournape/Bento,cournape/Bento,cournape/Bento | yaku/tools/cython.py | yaku/tools/cython.py | import os
import sys
from yaku.task_manager \
import \
extension, get_extension_hook
from yaku.task \
import \
Task
from yaku.compiled_fun \
import \
compile_fun
from yaku.utils \
import \
ensure_dir, find_program
import yaku.errors
@extension(".pyx")
def cython_hook(self, node):
self.sources.append(node.change_ext(".c"))
return cython_task(self, node)
def cython_task(self, node):
out = node.change_ext(".c")
target = node.parent.declare(out.name)
ensure_dir(target.name)
task = Task("cython", inputs=[node], outputs=[target])
task.gen = self
task.env_vars = []
task.env = self.env
self.env["CYTHON_INCPATH"] = ["-I%s" % p for p in
self.env["CYTHON_CPPPATH"]]
task.func = compile_fun("cython", "cython ${SRC} -o ${TGT} ${CYTHON_INCPATH}",
False)[0]
return [task]
def configure(ctx):
sys.stderr.write("Looking for cython... ")
if detect(ctx):
sys.stderr.write("yes\n")
else:
sys.stderr.write("no!\n")
raise yaku.errors.ToolNotFound()
ctx.env["CYTHON_CPPPATH"] = []
def detect(ctx):
if find_program("cython") is None:
return False
else:
return True
| import os
import sys
from yaku.task_manager \
import \
extension, get_extension_hook
from yaku.task \
import \
Task
from yaku.compiled_fun \
import \
compile_fun
from yaku.utils \
import \
ensure_dir, find_program
import yaku.errors
@extension(".pyx")
def cython_task(self, node):
out = node.change_ext(".c")
target = node.parent.declare(out.name)
ensure_dir(target.name)
task = Task("cython", inputs=[node], outputs=[target])
task.gen = self
task.env_vars = []
task.env = self.env
self.env["CYTHON_INCPATH"] = ["-I%s" % p for p in
self.env["CYTHON_CPPPATH"]]
task.func = compile_fun("cython", "cython ${SRC} -o ${TGT} ${CYTHON_INCPATH}",
False)[0]
return [task]
def configure(ctx):
sys.stderr.write("Looking for cython... ")
if detect(ctx):
sys.stderr.write("yes\n")
else:
sys.stderr.write("no!\n")
raise yaku.errors.ToolNotFound()
ctx.env["CYTHON_CPPPATH"] = []
def detect(ctx):
if find_program("cython") is None:
return False
else:
return True
| bsd-3-clause | Python |
af96c316f485ebed2ad342aa2ea720d8b699f649 | bump version | ArabellaTech/ydcommon,ArabellaTech/ydcommon,ArabellaTech/ydcommon | ydcommon/__init__.py | ydcommon/__init__.py | """
YD Technology common libraries
"""
VERSION = (0, 1, 2)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
version = '.'.join((str(each) for each in VERSION[:3]))
if len(VERSION) > 3:
version += str(VERSION[3])
return version
| """
YD Technology common libraries
"""
VERSION = (0, 1, 1)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
version = '.'.join((str(each) for each in VERSION[:3]))
if len(VERSION) > 3:
version += str(VERSION[3])
return version
| mit | Python |
5a31ed001626937772a30ab46b94fe2b4bb5cfb8 | allow 2013 candidates | sunlightlabs/read_FEC,sunlightlabs/read_FEC,sunlightlabs/read_FEC,sunlightlabs/read_FEC | fecreader/summary_data/management/commands/add_candidates.py | fecreader/summary_data/management/commands/add_candidates.py | from django.core.management.base import BaseCommand, CommandError
from ftpdata.models import Candidate
from summary_data.models import Candidate_Overlay
from summary_data.utils.overlay_utils import make_candidate_overlay_from_masterfile
election_year = 2014
cycle = str(election_year)
class Command(BaseCommand):
help = "Add new candidates"
requires_model_validation = False
def handle(self, *args, **options):
candidates = Candidate.objects.filter(cycle=cycle, cand_election_year__in=[2013,2014])
# We'll miss folks who put the wrong election year in their filing, but...
for candidate in candidates:
# will doublecheck that it doesn't already exist before creating it
make_candidate_overlay_from_masterfile(candidate.cand_id, election_year=candidate.cand_election_year)
| from django.core.management.base import BaseCommand, CommandError
from ftpdata.models import Candidate
from summary_data.models import Candidate_Overlay
from summary_data.utils.overlay_utils import make_candidate_overlay_from_masterfile
election_year = 2014
cycle = str(election_year)
class Command(BaseCommand):
help = "Add new candidates"
requires_model_validation = False
def handle(self, *args, **options):
candidates = Candidate.objects.filter(cycle=cycle, cand_election_year__in=[2013,2014])
# We'll miss folks who put the wrong election year in their filing, but...
for candidate in candidates:
# will doublecheck that it doesn't already exist before creating it
make_candidate_overlay_from_masterfile(candidate.cand_id, cand_election_year=candidate.cand_election_year)
| bsd-3-clause | Python |
bf4a197618bf09a164f03a53cd6998bcd6ee8196 | Fix function name | thenenadx/forseti-security,thenenadx/forseti-security,thenenadx/forseti-security | google/cloud/security/common/data_access/violation_format.py | google/cloud/security/common/data_access/violation_format.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides formatting functions for violations"""
import json
def format_violation(violation):
"""Format the policy violation data into a tuple.
Also flattens the RuleViolation, since it consists of the resource,
rule, and members that don't meet the rule criteria.
Various properties of RuleViolation may also have values that exceed the
declared column length, so truncate as necessary to prevent MySQL errors.
Args:
violation (namedtuple): The Policy RuleViolation. This is a named
tumple with the following attributes 'resource_type','resource_id',
'rule_name', 'violation_type' and 'violation_data'
Yields:
tuple: A tuple of the rule violation properties.
"""
resource_type = violation.resource_type
if resource_type:
resource_type = resource_type[:255]
resource_id = violation.resource_id
if resource_id:
resource_id = str(resource_id)[:255]
rule_name = violation.rule_name
if rule_name:
rule_name = rule_name[:255]
yield (resource_type,
resource_id,
rule_name,
violation.rule_index,
violation.violation_type,
json.dumps(violation.violation_data))
def format_groups_violation(violation):
"""Format the groups violation data into a tuple.
Args:
violation (namedtuple): The groups violation. This is a named tuple
with the following attributes 'member_email','parent.member_email',
'violated_rule_names'
Yields:
tuple: A tuple of the violation properties.
"""
member_email = violation.member_email
if member_email:
member_email = member_email[:255]
group_email = violation.parent.member_email
if group_email:
group_email = group_email[:255]
violated_rule_names = json.dumps(violation.violated_rule_names)
yield (member_email,
group_email,
violated_rule_names)
| # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides formatting functions for violations"""
import json
def format_policy_violation(violation):
"""Format the policy violation data into a tuple.
Also flattens the RuleViolation, since it consists of the resource,
rule, and members that don't meet the rule criteria.
Various properties of RuleViolation may also have values that exceed the
declared column length, so truncate as necessary to prevent MySQL errors.
Args:
violation (namedtuple): The Policy RuleViolation. This is a named
tumple with the following attributes 'resource_type','resource_id',
'rule_name', 'violation_type' and 'violation_data'
Yields:
tuple: A tuple of the rule violation properties.
"""
resource_type = violation.resource_type
if resource_type:
resource_type = resource_type[:255]
resource_id = violation.resource_id
if resource_id:
resource_id = str(resource_id)[:255]
rule_name = violation.rule_name
if rule_name:
rule_name = rule_name[:255]
yield (resource_type,
resource_id,
rule_name,
violation.rule_index,
violation.violation_type,
json.dumps(violation.violation_data))
def format_groups_violation(violation):
"""Format the groups violation data into a tuple.
Args:
violation (namedtuple): The groups violation. This is a named tuple
with the following attributes 'member_email','parent.member_email',
'violated_rule_names'
Yields:
tuple: A tuple of the violation properties.
"""
member_email = violation.member_email
if member_email:
member_email = member_email[:255]
group_email = violation.parent.member_email
if group_email:
group_email = group_email[:255]
violated_rule_names = json.dumps(violation.violated_rule_names)
yield (member_email,
group_email,
violated_rule_names)
| apache-2.0 | Python |
17b9ccbcf940c653c2ee0994eefec87ca2961b75 | Fix extension scraper on Python 3.x | Microsoft/PTVS,huguesv/PTVS,fivejjs/PTVS,bolabola/PTVS,huguesv/PTVS,juanyaw/PTVS,zooba/PTVS,int19h/PTVS,MetSystem/PTVS,Microsoft/PTVS,DEVSENSE/PTVS,Microsoft/PTVS,MetSystem/PTVS,ChinaQuants/PTVS,DEVSENSE/PTVS,int19h/PTVS,xNUTs/PTVS,mlorbetske/PTVS,crwilcox/PTVS,alanch-ms/PTVS,msunardi/PTVS,jkorell/PTVS,jkorell/PTVS,DinoV/PTVS,christer155/PTVS,christer155/PTVS,ChinaQuants/PTVS,int19h/PTVS,int19h/PTVS,gilbertw/PTVS,dut3062796s/PTVS,fivejjs/PTVS,gomiero/PTVS,gomiero/PTVS,fjxhkj/PTVS,Habatchii/PTVS,int19h/PTVS,huguesv/PTVS,modulexcite/PTVS,int19h/PTVS,dut3062796s/PTVS,gilbertw/PTVS,mlorbetske/PTVS,gilbertw/PTVS,gilbertw/PTVS,juanyaw/PTVS,fjxhkj/PTVS,DinoV/PTVS,modulexcite/PTVS,Habatchii/PTVS,DEVSENSE/PTVS,denfromufa/PTVS,xNUTs/PTVS,denfromufa/PTVS,zooba/PTVS,Microsoft/PTVS,Habatchii/PTVS,bolabola/PTVS,alanch-ms/PTVS,modulexcite/PTVS,zooba/PTVS,gomiero/PTVS,DEVSENSE/PTVS,juanyaw/PTVS,denfromufa/PTVS,juanyaw/PTVS,alanch-ms/PTVS,DinoV/PTVS,MetSystem/PTVS,MetSystem/PTVS,fjxhkj/PTVS,ChinaQuants/PTVS,alanch-ms/PTVS,MetSystem/PTVS,msunardi/PTVS,msunardi/PTVS,huguesv/PTVS,xNUTs/PTVS,mlorbetske/PTVS,dut3062796s/PTVS,mlorbetske/PTVS,xNUTs/PTVS,fivejjs/PTVS,huguesv/PTVS,zooba/PTVS,gomiero/PTVS,MetSystem/PTVS,gomiero/PTVS,jkorell/PTVS,Microsoft/PTVS,xNUTs/PTVS,juanyaw/PTVS,crwilcox/PTVS,fjxhkj/PTVS,dut3062796s/PTVS,fjxhkj/PTVS,gilbertw/PTVS,DEVSENSE/PTVS,ChinaQuants/PTVS,DinoV/PTVS,dut3062796s/PTVS,msunardi/PTVS,christer155/PTVS,modulexcite/PTVS,modulexcite/PTVS,alanch-ms/PTVS,Microsoft/PTVS,bolabola/PTVS,ChinaQuants/PTVS,fjxhkj/PTVS,dut3062796s/PTVS,gomiero/PTVS,Habatchii/PTVS,DinoV/PTVS,crwilcox/PTVS,jkorell/PTVS,christer155/PTVS,bolabola/PTVS,alanch-ms/PTVS,DinoV/PTVS,zooba/PTVS,DEVSENSE/PTVS,Habatchii/PTVS,crwilcox/PTVS,mlorbetske/PTVS,jkorell/PTVS,zooba/PTVS,denfromufa/PTVS,jkorell/PTVS,huguesv/PTVS,juanyaw/PTVS,mlorbetske/PTVS,christer155/PTVS,christer155/PTVS,crwilcox/PTVS,denfromufa/PTVS,bolabola/PTVS,msunardi/PTVS,Habatchii/PTVS,crwilcox/PTVS,gilbertw/PTVS,bolabola/PTVS,xNUTs/PTVS,msunardi/PTVS,denfromufa/PTVS,fivejjs/PTVS,fivejjs/PTVS,ChinaQuants/PTVS,fivejjs/PTVS,modulexcite/PTVS | Release/Product/Python/PythonTools/ExtensionScraper.py | Release/Product/Python/PythonTools/ExtensionScraper.py | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import imp
import sys
from os import path
try:
# disable error reporting in our process, bad extension modules can crash us, and we don't
# want a bunch of Watson boxes popping up...
import ctypes
ctypes.windll.kernel32.SetErrorMode(3) # SEM_FAILCRITICALERRORS / SEM_NOGPFAULTERRORBOX
except:
pass
# Expects either:
# scrape [filename] [output_path]
# Scrapes the file and saves the analysis to the specified filename, exits w/ nonzero exit code if anything goes wrong.
if len(sys.argv) == 4:
if sys.argv[1] == 'scrape':
filename = sys.argv[2]
mod_name = path.splitext(path.basename(filename))[0]
try:
module = imp.load_dynamic(mod_name, filename)
except ImportError:
e = sys.exc_info()[1]
print e
sys.exit(1)
import PythonScraper
analysis = PythonScraper.generate_module(module)
PythonScraper.write_analysis(sys.argv[3], analysis)
| # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import imp
import sys
from os import path
try:
# disable error reporting in our process, bad extension modules can crash us, and we don't
# want a bunch of Watson boxes popping up...
import ctypes
ctypes.windll.kernel32.SetErrorMode(3) # SEM_FAILCRITICALERRORS / SEM_NOGPFAULTERRORBOX
except:
pass
# Expects either:
# scrape [filename] [output_path]
# Scrapes the file and saves the analysis to the specified filename, exits w/ nonzero exit code if anything goes wrong.
if len(sys.argv) == 4:
if sys.argv[1] == 'scrape':
filename = sys.argv[2]
mod_name = path.splitext(path.basename(filename))[0]
try:
module = imp.load_dynamic(mod_name, filename)
except ImportError, e:
print e
sys.exit(1)
import PythonScraper
analysis = PythonScraper.generate_module(module)
PythonScraper.write_analysis(sys.argv[3], analysis)
| apache-2.0 | Python |
9780274756ef4bc2966a0f8290ca28bd3c1e8163 | update dev version after 0.31.1 tag [skip ci] | desihub/desisim,desihub/desisim | py/desisim/_version.py | py/desisim/_version.py | __version__ = '0.31.1.dev1940'
| __version__ = '0.31.1'
| bsd-3-clause | Python |
edc13c1309d550a3acc5b833d0efedaf7be4045e | Fix several off-by-one errors in split_tex_string() and add regression tests. | live-clones/pybtex | pybtex/bibtex/utils.py | pybtex/bibtex/utils.py | # Copyright (C) 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def bibtex_len(s):
"""Return the number of characters in s, taking TeX' special chars into accoount.
"""
#FIXME stub
return len(s)
def split_name_list(string):
"""
Split a list of names, separated by ' and '.
>>> split_name_list('Johnson and Peterson')
['Johnson', 'Peterson']
>>> split_name_list('Armand and Peterson')
['Armand', 'Peterson']
>>> split_name_list('Armand and anderssen')
['Armand', 'anderssen']
>>> split_name_list('What a Strange{ }and Bizzare Name! and Peterson')
['What a Strange{ }and Bizzare Name!', 'Peterson']
>>> split_name_list('What a Strange and{ }Bizzare Name! and Peterson')
['What a Strange and{ }Bizzare Name!', 'Peterson']
"""
return split_tex_string(string, ' and ')
def split_tex_string(string, sep=' ', strip=True):
"""Split a string using the given separator, ignoring separators at brace level > 0.
>>> split_tex_string('')
[]
>>> split_tex_string('a')
['a']
>>> split_tex_string('on a')
['on', 'a']
"""
brace_level = 0
name_start = 0
result = []
string_len = len(string)
sep_len = len(sep)
pos = 0
for pos, char in enumerate(string):
if char == '{':
brace_level += 1
elif char == '}':
brace_level -= 1
elif (
brace_level == 0 and
string[pos:pos + len(sep)].lower() == sep and
pos > 0 and
pos + len(sep) < string_len
):
result.append(string[name_start:pos])
name_start = pos + len(sep)
if name_start < string_len:
result.append(string[name_start:])
if strip:
return [part.strip() for part in result]
else:
return result
| # Copyright (C) 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def bibtex_len(s):
"""Return the number of characters in s, taking TeX' special chars into accoount.
"""
#FIXME stub
return len(s)
def split_name_list(string):
"""
Split a list of names, separated by ' and '.
>>> split_name_list('Johnson and Peterson')
['Johnson', 'Peterson']
>>> split_name_list('Armand and Peterson')
['Armand', 'Peterson']
>>> split_name_list('Armand and anderssen')
['Armand', 'anderssen']
>>> split_name_list('What a Strange{ }and Bizzare Name! and Peterson')
['What a Strange{ }and Bizzare Name!', 'Peterson']
>>> split_name_list('What a Strange and{ }Bizzare Name! and Peterson')
['What a Strange and{ }Bizzare Name!', 'Peterson']
"""
return split_tex_string(string, ' and ')
def split_tex_string(string, sep=' ', strip=True):
"""Split a string using the given separator, ignoring separators at brace level > 0."""
brace_level = 0
name_start = 0
result = []
end = len(string) - 1
sep_len = len(sep)
for pos, char in enumerate(string):
if char == '{':
brace_level += 1
elif char == '}':
brace_level -= 1
elif (
brace_level == 0 and
string[pos:pos + len(sep)].lower() == sep and
pos > 0 and
pos + len(sep) < end
):
result.append(string[name_start:pos])
name_start = pos + len(sep)
result.append(string[name_start:])
if strip:
return [part.strip() for part in result]
else:
return result
| mit | Python |
32c9b0ead88e8ecd570f7fda1bb50808c8fb73b8 | Hide log call capture | hamzasheikh/pytest-hidecaptured | pytest_hidecaptured.py | pytest_hidecaptured.py | # -*- coding: utf-8 -*-
import pytest
@pytest.mark.tryfirst
def pytest_runtest_logreport(report):
"""Overwrite report by removing any captured stderr."""
# print("PLUGIN SAYS -> report -> {0}".format(report))
# print("PLUGIN SAYS -> report.sections -> {0}".format(report.sections))
# print("PLUGIN SAYS -> dir(report) -> {0}".format(dir(report)))
# print("PLUGIN SAYS -> type(report) -> {0}".format(type(report)))
sections = [
item
for item in report.sections
if item[0] not in (
"Captured stdout call",
"Captured stderr call",
"Captured stdout setup",
"Captured stderr setup",
"Captured stdout teardown",
"Captured stderr teardown",
"Captured log call",
)
]
# print("PLUGIN SAYS -> sections -> {0}".format(sections))
report.sections = sections
| # -*- coding: utf-8 -*-
import pytest
@pytest.mark.tryfirst
def pytest_runtest_logreport(report):
"""Overwrite report by removing any captured stderr."""
# print("PLUGIN SAYS -> report -> {0}".format(report))
# print("PLUGIN SAYS -> report.sections -> {0}".format(report.sections))
# print("PLUGIN SAYS -> dir(report) -> {0}".format(dir(report)))
# print("PLUGIN SAYS -> type(report) -> {0}".format(type(report)))
sections = [item for item in report.sections if item[0] not in ("Captured stdout call", "Captured stderr call", "Captured stdout setup", "Captured stderr setup", "Captured stdout teardown", "Captured stderr teardown")]
# print("PLUGIN SAYS -> sections -> {0}".format(sections))
report.sections = sections
| mit | Python |
a1dd37c9127501ad440c7777d14fb28b1b59b85b | Add list guests function | tomviner/dojo-adventure-game | characters.py | characters.py | from adventurelib import Item, Bag, when
class Man(Item):
subject_pronoun = 'he'
object_pronoun = 'him'
class Woman(Item):
subject_pronoun = 'she'
object_pronoun = 'her'
dr_black = the_victim = Man('Dr. Black', 'Dr Black', 'the victim')
dr_black.def_name = 'the victim'
dr_black.description = """\
Dr. Black was the much beloved host and owner of Albermore Manor. His untimely
death has come as a shock and surprise to most of tonight's guests."""
miss_scarlet = Woman('Miss Scarlet')
miss_scarlet.def_name = 'Miss Scarlet'
miss_scarlet.description = """\
Miss Scarlet is well liked by the younger gentlemen at tonight's gathering.
She is mistrusted by some and seems to have quite the salacious reputation."""
col_mustard = Man('Colonel Mustard', 'Col. Mustard', 'Col Mustard')
col_mustard.def_name = 'Colonel Mustard'
col_mustard.description = """\
The Colonel is a stern man who accepts no "nonsense". His long and esteemed
military career has left him with"""
mrs_white = Woman('Mrs. White', 'Mrs White')
mrs_white.def_name = 'Mrs. White'
rev_green = Man(
'Reverend Green', 'Rev. Green', 'Rev Green', 'Mr. Green', 'Mr Green')
rev_green.def_name = 'Reverend Green'
mrs_peacock = Woman('Mrs. Peacock', 'Mrs Peacock')
mrs_peacock.def_name = 'Mrs. Peacock'
prof_plum = Man('Professor Plum', 'Prof. Plum', 'Prof Plum')
prof_plum.def_name = 'Prefessor Plum'
guests = Bag([
miss_scarlet, col_mustard, mrs_white, rev_green, mrs_peacock, prof_plum
])
@when('list guests')
def list_rooms():
print("A nearby guest list for tonight's gathering has the following names:")
for c in guests:
print(c)
if __name__ == '__main__':
assert prof_plum == guests.find('Prof. Plum')
assert prof_plum != guests.find('Plum')
| from adventurelib import Item, Bag
class Man(Item):
subject_pronoun = 'he'
object_pronoun = 'him'
class Woman(Item):
subject_pronoun = 'she'
object_pronoun = 'her'
dr_black = the_victim = Man('Dr. Black', 'Dr Black', 'the victim')
dr_black.def_name = 'the victim'
dr_black.description = """\
Dr. Black was the much beloved host and owner of Tudor Close. His untimely
death has come as a shock and surprise to most of tonight's guests."""
miss_scarlet = Woman('Miss Scarlet')
miss_scarlet.def_name = 'Miss Scarlet'
miss_scarlet.description = """\
Miss Scarlet is well liked by the younger gentlemen at tonight's gathering.
She is mistrusted by some and seems to have quite the salacious reputation."""
col_mustard = Man('Colonel Mustard', 'Col. Mustard', 'Col Mustard')
col_mustard.def_name = 'Colonel Mustard'
col_mustard.description = """\
The Colonel is a stern man who accepts no "nonsense". His long and esteemed
military career has left him with"""
mrs_white = Woman('Mrs. White', 'Mrs White')
mrs_white.def_name = 'Mrs. White'
rev_green = Man(
'Reverend Green', 'Rev. Green', 'Rev Green', 'Mr. Green', 'Mr Green')
rev_green.def_name = 'Reverend Green'
mrs_peacock = Woman('Mrs. Peacock', 'Mrs Peacock')
mrs_peacock.def_name = 'Mrs. Peacock'
prof_plum = Man('Professor Plum', 'Prof. Plum', 'Prof Plum')
prof_plum.def_name = 'Prefessor Plum'
everyone = Bag([
miss_scarlet, col_mustard, mrs_white, rev_green, mrs_peacock, prof_plum
])
if __name__ == '__main__':
assert prof_plum == everyone.find('Prof. Plum')
assert prof_plum != everyone.find('Plum')
| mit | Python |
919d67e6f46d4f991cc5caa5893beebfe94e0d9e | Add hash mock | MizukiSonoko/iroha-cli,MizukiSonoko/iroha-cli | cli/crypto.py | cli/crypto.py | from ctypes import *
import base64
import os
def generate_hex_sstr():
publicKey64 = "Not implemente"
privateKey64 = "Not implemente"
return (publicKey64,privateKey64)
def hash(msg):
return "Not implemente" | from ctypes import *
import base64
import os
def generate_hex_sstr():
publicKey64 = "Not implemente"
privateKey64 = "Not implemente"
return (publicKey64,privateKey64)
| apache-2.0 | Python |
2159a35811cac75b0c68677fc41443aa8eac6e5b | Stop conn_join from overriding channel restrictions | Heufneutje/txircd,DesertBus/txircd,ElementalAlchemist/txircd | txircd/modules/conn_join.py | txircd/modules/conn_join.py | from txircd.channel import IRCChannel
from txircd.modbase import Module
class Autojoin(Module):
def joinOnConnect(self, user):
if "client_join_on_connect" in self.ircd.servconfig:
for channel in self.ircd.servconfig["client_join_on_connect"]:
user.handleCommand("JOIN", None, [channel])
return True
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.conn_join = None
def spawn(self):
self.conn_join = Autojoin().hook(self.ircd)
return {
"actions": {
"welcome": self.conn_join.joinOnConnect
}
} | from txircd.channel import IRCChannel
from txircd.modbase import Module
class Autojoin(Module):
def joinOnConnect(self, user):
if "client_join_on_connect" in self.ircd.servconfig:
for channel in self.ircd.servconfig["client_join_on_connect"]:
user.join(self.ircd.channels[channel] if channel in self.ircd.channels else IRCChannel(self.ircd, channel))
return True
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.conn_join = None
def spawn(self):
self.conn_join = Autojoin().hook(self.ircd)
return {
"actions": {
"welcome": self.conn_join.joinOnConnect
}
} | bsd-3-clause | Python |
142538049bd1bf8ae92c80060435965104ec54bb | Add ability to use the pattern ((args, kwargs), callable) when specifying schema | fabianvf/scrapi,mehanig/scrapi,felliott/scrapi,erinspace/scrapi,felliott/scrapi,icereval/scrapi,mehanig/scrapi,ostwald/scrapi,alexgarciac/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,jeffreyliu3230/scrapi | scrapi/base/transformer.py | scrapi/base/transformer.py | from __future__ import unicode_literals
import abc
import logging
logger = logging.getLogger(__name__)
class BaseTransformer(object):
__metaclass__ = abc.ABCMeta
def transform(self, doc):
return self._transform(self.schema, doc)
def _transform(self, schema, doc):
transformed = {}
for key, value in schema.items():
if isinstance(value, dict):
transformed[key] = self._transform(value, doc)
elif isinstance(value, list) or isinstance(value, tuple):
transformed[key] = self._transform_iter(value, doc)
elif isinstance(value, basestring):
transformed[key] = self._transform_string(value, doc)
return transformed
def _transform_iter(self, l, doc):
docs = []
if isinstance(l[0], tuple) and len(l) == 2:
return self._transform_arg_kwargs(l, doc)
for value in l:
if isinstance(value, basestring):
docs.append(self._transform_string(value, doc))
elif callable(value):
return value(*[res for res in docs])
def _transform_arg_kwargs(self, l, doc):
if len(l[0]) == 1:
if isinstance(l[0][0], dict):
kwargs = l[0][0]
args = []
elif isinstance(l[0][0], tuple) or isinstance(l[0][0], list):
args = l[0][0]
kwargs = {}
else:
raise ValueError("((args, kwargs), callable) pattern not matched, {} does not define args or kwargs correctly".format(l))
else:
args = l[0][0]
kwargs = l[0][1]
fn = l[1]
return fn(
*[self._transform_string(arg, doc) for arg in args],
**{key: self._transform_string(value, doc) for key, value in kwargs.items()}
)
@abc.abstractmethod
def _transform_string(self, string, doc):
raise NotImplementedError
@abc.abstractproperty
def name(self):
raise NotImplementedError
@abc.abstractproperty
def schema(self):
raise NotImplementedError
class XMLTransformer(BaseTransformer):
__metaclass__ = abc.ABCMeta
def _transform_string(self, string, doc):
val = doc.xpath(string, namespaces=self.namespaces)
return '' if not val else unicode(val[0]) if len(val) == 1 else [unicode(v) for v in val]
@abc.abstractproperty
def namespaces(self):
raise NotImplementedError
| from __future__ import unicode_literals
import abc
import logging
logger = logging.getLogger(__name__)
class BaseTransformer(object):
__metaclass__ = abc.ABCMeta
def transform(self, doc):
return self._transform(self.schema, doc)
def _transform(self, schema, doc):
transformed = {}
for key, value in schema.items():
if isinstance(value, dict):
transformed[key] = self._transform(value, doc)
elif isinstance(value, list) or isinstance(value, tuple):
transformed[key] = self._transform_iter(value, doc)
elif isinstance(value, basestring):
transformed[key] = self._transform_string(value, doc)
return transformed
def _transform_iter(self, l, doc):
docs = []
for value in l:
if isinstance(value, basestring):
docs.append(self._transform_string(value, doc))
elif callable(value):
return value(*[res for res in docs])
@abc.abstractmethod
def _transform_string(self, string, doc):
raise NotImplementedError
@abc.abstractproperty
def name(self):
raise NotImplementedError
@abc.abstractproperty
def schema(self):
raise NotImplementedError
class XMLTransformer(BaseTransformer):
__metaclass__ = abc.ABCMeta
def _transform_string(self, string, doc):
val = doc.xpath(string, namespaces=self.namespaces)
return '' if not val else unicode(val[0]) if len(val) == 1 else [unicode(v) for v in val]
@abc.abstractproperty
def namespaces(self):
raise NotImplementedError
| apache-2.0 | Python |
465c1c1c9d7c102b4d35eb8c228565dbf8d35910 | simplify the code | uutils/coreutils,uutils/coreutils,uutils/coreutils | util/remaining-gnu-error.py | util/remaining-gnu-error.py | #!/usr/bin/env python3
# This script lists the GNU failing tests by size
# Just like with util/run-gnu-test.sh, we expect the gnu sources
# to be in ../
import urllib.request
import urllib
import os
import glob
import json
base = "../gnu/tests/"
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/uutils/coreutils-tracking/main/gnu-full-result.json",
"result.json",
)
types = ("/*/*.sh", "/*/*.pl", "/*/*.xpl")
tests = []
for files in types:
tests.extend(glob.glob(base + files))
# sort by size
list_of_files = sorted(tests, key=lambda x: os.stat(x).st_size)
with open("result.json", "r") as json_file:
data = json.load(json_file)
for d in data:
for e in data[d]:
# Not all the tests are .sh files, rename them if not.
script = e.replace(".log", ".sh")
a = f"{base}{d}{script}"
if not os.path.exists(a):
a = a.replace(".sh", ".pl")
if not os.path.exists(a):
a = a.replace(".pl", ".xpl")
# the tests pass, we don't care anymore
if data[d][e] == "PASS":
list_of_files.remove(a)
# Remove the factor tests and reverse the list (bigger first)
tests = list(filter(lambda k: "factor" not in k, list_of_files))
for f in reversed(tests):
print("%s: %s" % (f, os.stat(f).st_size))
print("")
print("%s tests remaining" % len(tests))
| #!/usr/bin/env python3
# This script lists the GNU failing tests by size
# Just like with util/run-gnu-test.sh, we expect the gnu sources
# to be in ../
import urllib.request
import urllib
import os
import glob
import json
base = "../gnu/tests/"
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/uutils/coreutils-tracking/main/gnu-full-result.json",
"result.json",
)
tests = glob.glob(base + "/*/*.sh")
tests_pl = glob.glob(base + "/*/*.pl")
tests_xpl = glob.glob(base + "/*/*.xpl")
tests = tests + tests_pl + tests_xpl
# sort by size
list_of_files = sorted(tests, key=lambda x: os.stat(x).st_size)
with open("result.json", "r") as json_file:
data = json.load(json_file)
for d in data:
for e in data[d]:
# Not all the tests are .sh files, rename them if not.
script = e.replace(".log", ".sh")
a = f"{base}{d}{script}"
if not os.path.exists(a):
a = a.replace(".sh", ".pl")
if not os.path.exists(a):
a = a.replace(".pl", ".xpl")
# the tests pass, we don't care anymore
if data[d][e] == "PASS":
try:
list_of_files.remove(a)
except ValueError:
# Ignore the error
pass
# Remove the factor tests and reverse the list (bigger first)
tests = list(filter(lambda k: "factor" not in k, list_of_files))
for f in reversed(tests):
print("%s: %s" % (f, os.stat(f).st_size))
print("")
print("%s tests remaining" % len(tests))
| mit | Python |
b2a4967e956c07831516d90411f16d9f46a62cfb | Update script for py3 and cross-platform TMPDIR access | OpenChemistry/avogadroapp,ghutchis/avogadroapp,ghutchis/avogadroapp,ghutchis/avogadroapp,OpenChemistry/avogadroapp,OpenChemistry/avogadroapp,ghutchis/avogadroapp,OpenChemistry/avogadroapp | scripts/avogadro-remote.py | scripts/avogadro-remote.py | #!/usr/bin/python
from __future__ import print_function
import sys
import json
import time
import socket
import struct
import tempfile
class Connection:
def __init__(self, name = "avogadro"):
# create socket
self.sock = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
# connect
self.sock.connect(tempfile.gettempdir() + '/' + name)
def send_json(self, obj):
self.send_message(json.dumps(obj))
def send_message(self, msg):
sz = len(msg)
hdr = struct.pack('>I', sz)
pkt = hdr + msg.encode('ascii')
self.sock.send(pkt)
def recv_message(self, size = 1024):
pkt = self.sock.recv(size)
return pkt[4:]
def recv_json(self):
msg = self.recv_message()
try:
return json.loads(msg)
except Exception as e:
print('error: ' + str(e))
return {}
def close(self):
# close socket
self.sock.close()
if __name__ == '__main__':
conn = Connection()
method = sys.argv[1]
if method == 'openFile':
conn.send_json(
{
'jsonrpc' : '2.0',
'id' : 0,
'method' : 'openFile',
'params' : {
'fileName' : str(sys.argv[2])
}
}
)
elif method == 'kill':
conn.send_json(
{
'jsonrpc' : '2.0',
'id' : 0,
'method' : 'kill'
}
)
else:
print('unknown method: ' + method)
conn.close()
sys.exit(-1)
print('reply: ' + str(conn.recv_message()))
conn.close()
| #!/usr/bin/python
import sys
import json
import time
import socket
import struct
class Connection:
def __init__(self, name = "avogadro"):
# create socket
self.sock = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
# connect
self.sock.connect("/tmp/" + name)
def send_json(self, obj):
self.send_message(json.dumps(obj))
def send_message(self, msg):
sz = len(msg)
hdr = struct.pack('>I', sz)
pkt = hdr + msg
self.sock.send(pkt)
def recv_message(self, size = 1024):
pkt = self.sock.recv(size)
return pkt[4:]
def recv_json(self):
msg = self.recv_message()
try:
return json.loads(msg)
except Exception as e:
print 'error: ' + str(e)
return {}
def close(self):
# close socket
self.sock.close()
if __name__ == '__main__':
conn = Connection()
method = sys.argv[1]
if method == 'openFile':
conn.send_json(
{
'jsonrpc' : '2.0',
'id' : 0,
'method' : 'openFile',
'params' : {
'fileName' : str(sys.argv[2])
}
}
)
elif method == 'kill':
conn.send_json(
{
'jsonrpc' : '2.0',
'id' : 0,
'method' : 'kill'
}
)
else:
print 'unknown method: ' + method
sys.exit(-1)
conn.close()
print 'reply: ' + str(conn.recv_message())
conn.close()
| bsd-3-clause | Python |
5489fe0abc5dda3b6d41bee368cd0b9727459af3 | Add search urls for projects | Hackfmi/Diaphanum,Hackfmi/Diaphanum | projects/urls.py | projects/urls.py | from django.conf.urls import patterns, url
urlpatterns = patterns('projects.views',
url(r'^add/$', 'add_project', name='add-project'),
url(r'^edit/(?P<project_id>\d+)/$', 'edit_project', name='edit-project'),
url(r'^edit_status/(?P<project_id>\d+)/$', 'edit_status', name='edit-status'),
url(r'^archive/$', 'projects_archive', name='projects-archive'),
url(r'^archive/review/(?P<project_id>\d+)/$', 'show_project', name='show-project'),
url(r'^archive/review/versions/(?P<project_id>\d+)/$', 'show_project_versions', name='show-project-versions'),
url(r'^archive/(?P<year>\d{4})/(?P<month>\d{,2})/$', 'projects_year_month', name='projects-year-month'),
url(r'^search/user/(?P<searched_creator>\d*)/$', 'projects_by_creator', name='projects-by-creator'),
url(r'^search/status/(?P<searched_status>.*)/$', 'projects_by_status', name='projects-by-status'),
url(r'^search/name/(?P<searched_name>.*)/$', 'projects_by_name', name='projects-by-name'),
url(r'^search/(?P<searched_name>\d*)/(?P<searched_status>.*)/(?P<searched_creator>.*)/$', 'projects_complex_search', name='projects-complex-search'),
)
| from django.conf.urls import patterns, url
urlpatterns = patterns('projects.views',
url(r'^add/$', 'add_project', name='add-project'),
url(r'^edit/(?P<project_id>\d+)/$', 'edit_project', name='edit-project'),
url(r'^edit_status/(?P<project_id>\d+)/$', 'edit_status', name='edit-status'),
url(r'^archive/$', 'projects_archive', name='projects-archive'),
url(r'^archive/review/(?P<project_id>\d+)/$', 'show_project', name='show-project'),
url(r'^archive/review/versions/(?P<project_id>\d+)/$', 'show_project_versions', name='show-project-versions'),
url(r'^archive/(?P<year>\d{4})/(?P<month>\d{,2})/$', 'projects_year_month', name='projects-year-month'),
)
| mit | Python |
35b0af0fafb117e3cc613d3073602902fadb9c5c | Add daily-view to worker | SEC-i/ecoControl,SEC-i/ecoControl,SEC-i/ecoControl | server/worker/functions.py | server/worker/functions.py | import logging
import time
from django.db import connection
from server.models import SensorValue, Threshold, Notification
import functions
logger = logging.getLogger('worker')
def check_thresholds():
for threshold in Threshold.objects.all():
try:
latest_sensorvalue = SensorValue.objects.filter(
sensor=threshold.sensor).latest('timestamp')
if threshold.min_value is not None:
if latest_sensorvalue.value < threshold.min_value:
message = 'Threshold "%s" triggered (%s < %s)' % (
threshold.name, latest_sensorvalue.value, threshold.min_value)
Notification(threshold=threshold, message=message,
category=Notification.Danger, show_manager=threshold.show_manager).save()
logger.debug(message)
if threshold.max_value is not None:
if latest_sensorvalue.value > threshold.max_value:
message = 'Threshold "%s" triggered (%s > %s)' % (
threshold.name, latest_sensorvalue.value, threshold.max_value)
Notification(threshold=threshold, message=message,
category=Notification.Danger, show_manager=threshold.show_manager).save()
logger.debug(message)
except SensorValue.DoesNotExist:
logger.debug('No SensorValue found for Sensor #%s' %
threshold.sensor_id)
def refresh_views():
logger.debug('Trigger views refresh')
cursor = connection.cursor()
cursor.execute('''REFRESH MATERIALIZED VIEW server_sensorvaluehourly;''')
cursor.execute('''REFRESH MATERIALIZED VIEW server_sensorvaluedaily;''')
cursor.execute(
'''REFRESH MATERIALIZED VIEW server_sensorvaluemonthlysum;''')
cursor.execute(
'''REFRESH MATERIALIZED VIEW server_sensorvaluemonthlyavg;''')
logger.debug('Successfully refreshed views') | import logging
import time
from django.db import connection
from server.models import SensorValue, Threshold, Notification
import functions
logger = logging.getLogger('worker')
def check_thresholds():
for threshold in Threshold.objects.all():
try:
latest_sensorvalue = SensorValue.objects.filter(
sensor=threshold.sensor).latest('timestamp')
if threshold.min_value is not None:
if latest_sensorvalue.value < threshold.min_value:
message = 'Threshold "%s" triggered (%s < %s)' % (
threshold.name, latest_sensorvalue.value, threshold.min_value)
Notification(threshold=threshold, message=message,
category=Notification.Danger, show_manager=threshold.show_manager).save()
logger.debug(message)
if threshold.max_value is not None:
if latest_sensorvalue.value > threshold.max_value:
message = 'Threshold "%s" triggered (%s > %s)' % (
threshold.name, latest_sensorvalue.value, threshold.max_value)
Notification(threshold=threshold, message=message,
category=Notification.Danger, show_manager=threshold.show_manager).save()
logger.debug(message)
except SensorValue.DoesNotExist:
logger.debug('No SensorValue found for Sensor #%s' %
threshold.sensor_id)
def refresh_views():
logger.debug('Trigger views refresh')
cursor = connection.cursor()
cursor.execute('''REFRESH MATERIALIZED VIEW server_sensorvaluehourly;''')
cursor.execute(
'''REFRESH MATERIALIZED VIEW server_sensorvaluemonthlysum;''')
cursor.execute(
'''REFRESH MATERIALIZED VIEW server_sensorvaluemonthlyavg;''')
logger.debug('Successfully refreshed views')
| mit | Python |
13bb0a7ea546fed050b68c73730384c168370ac3 | Add typing for plogger. | KarlGong/ptest,KarlGong/ptest | ptest/plogger.py | ptest/plogger.py | import logging
import sys
from datetime import datetime
from . import config
class PConsole:
def __init__(self, out):
self.out = out
def write(self, msg: str):
self.out.write(str(msg))
def write_line(self, msg: str):
self.out.write(str(msg) + "\n")
pconsole = PConsole(sys.stdout)
pconsole_err = PConsole(sys.stderr)
class PReporter:
def __init__(self):
pass
def debug(self, msg: str, screenshot: bool = False):
self.__log(logging.DEBUG, msg, screenshot)
def info(self, msg: str, screenshot: bool = False):
self.__log(logging.INFO, msg, screenshot)
def warn(self, msg: str, screenshot: bool = False):
self.__log(logging.WARN, msg, screenshot)
def error(self, msg: str, screenshot: bool = False):
self.__log(logging.ERROR, msg, screenshot)
def critical(self, msg: str, screenshot: bool = False):
self.__log(logging.CRITICAL, msg, screenshot)
def __log(self, level: int, msg: str, screenshot: bool):
from . import test_executor, screen_capturer
try:
running_test_fixture = test_executor.current_executor().get_property("running_test_fixture")
except AttributeError as e:
pconsole.write_line("[%s] %s" % (logging.getLevelName(level), msg))
else:
log = {"time": str(datetime.now()), "level": logging.getLevelName(level).lower(), "message": str(msg)}
if screenshot and not config.get_option("disable_screenshot"):
log["screenshots"] = screen_capturer.take_screenshots()
running_test_fixture.logs.append(log)
if config.get_option("verbose"):
# output to pconsole
message = "[%s] %s" % (running_test_fixture.full_name, msg)
pconsole.write_line(message)
preporter = PReporter()
| import logging
import sys
from datetime import datetime
from . import config
class PConsole:
def __init__(self, out):
self.out = out
def write(self, msg):
self.out.write(str(msg))
def write_line(self, msg):
self.out.write(str(msg) + "\n")
pconsole = PConsole(sys.stdout)
pconsole_err = PConsole(sys.stderr)
class PReporter:
def __init__(self):
pass
def debug(self, msg, screenshot=False):
self.__log(logging.DEBUG, msg, screenshot)
def info(self, msg, screenshot=False):
self.__log(logging.INFO, msg, screenshot)
def warn(self, msg, screenshot=False):
self.__log(logging.WARN, msg, screenshot)
def error(self, msg, screenshot=False):
self.__log(logging.ERROR, msg, screenshot)
def critical(self, msg, screenshot=False):
self.__log(logging.CRITICAL, msg, screenshot)
def __log(self, level, msg, screenshot):
from . import test_executor, screen_capturer
try:
running_test_fixture = test_executor.current_executor().get_property("running_test_fixture")
except AttributeError as e:
pconsole.write_line("[%s] %s" % (logging.getLevelName(level), msg))
else:
log = {"time": str(datetime.now()), "level": logging.getLevelName(level).lower(), "message": str(msg)}
if screenshot and not config.get_option("disable_screenshot"):
log["screenshots"] = screen_capturer.take_screenshots()
running_test_fixture.logs.append(log)
if config.get_option("verbose"):
# output to pconsole
message = "[%s] %s" % (running_test_fixture.full_name, msg)
pconsole.write_line(message)
preporter = PReporter()
| apache-2.0 | Python |
3b5b3afbc66f60df45f0458ffdd0d37b9a7c50d0 | Add homemade fast width/height reader for JPEG files | vperron/picasa-toolbox | ptoolbox/tags.py | ptoolbox/tags.py | # -*- coding: utf-8 -*-
import struct
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def jpeg_size(path):
"""Get image size.
Structure of JPEG file is:
ffd8 [ffXX SSSS DD DD ...] [ffYY SSSS DDDD ...] (S is 16bit size, D the data)
We look for the SOF0 header 0xffc0; its structure is
[ffc0 SSSS PPHH HHWW ...] where PP is 8bit precision, HHHH 16bit height, WWWW width
"""
with open(path, 'rb') as f:
_, header_type, size = struct.unpack('>HHH', f.read(6))
while header_type != 0xffc0:
f.seek(size - 2, 1)
header_type, size = struct.unpack('>HH', f.read(4))
bpi, height, width = struct.unpack('>BHH', f.read(5))
return width, height
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
| # -*- coding: utf-8 -*-
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
| mit | Python |
7a64ac255f53e85f888093daac83b3c0fabcf15e | Update ESPEC_tests.py | jmbattle/pyESPEC | ESPEC_tests.py | ESPEC_tests.py | # -*- coding: utf-8 -*-
"""ESPEC_tests.py: Simple test routine for pyESPEC library
__author__ = "Jason M. Battle"
__copyright__ = "Copyright 2016, Jason M. Battle"
__license__ = "MIT"
__email__ = "[email protected]"
"""
from ESPEC import SH241
if __name__ == '__main__':
test = SH241()
test.OpenChannel()
if test.GetMode() == 'OFF':
test.SetPowerOn()
# Read Commands
test.GetROMVersion()
test.GetIntStatus()
test.GetIntMask()
test.GetAlarmStat()
test.GetKeyProtStat()
test.GetType()
test.GetMode()
test.GetCondition()
test.GetTemp()
test.GetHumid()
test.GetRefrigeCtl()
test.GetRelayStat()
test.GetHeaterStat()
test.GetProgStat()
test.GetProgData()
test.GetProgStepData(1)
# Write Commands
test.SetIntMask(0b01000000)
test.ResetIntStatus()
test.SetKeyProtectOn()
test.SetKeyProtectOff()
test.SetPowerOff()
test.SetPowerOn()
test.SetTemp(25.0)
test.SetHighTemp(155.0)
test.SetLowTemp(-45.0)
test.SetHumid(50.0)
test.SetHighHumid(100)
test.SetLowHumid(0)
test.SetHumidOff()
test.SetRefrigeCtl(9)
test.SetRelayOn(1)
test.SetRelayOff(1)
test.SetModeOff()
test.SetModeStandby()
test.SetModeConstant()
test.ProgramWrite()
test.SetModeProgram()
test.ProgramAdvance()
test.ProgramEnd()
test.SetModeStandby()
test.ProgramErase()
test.SetModeOff()
test.CloseChannel()
| # -*- coding: utf-8 -*-
"""ESPEC_tests.py: Simple test routine for pyESPEC library
__author__ = "Jason M. Battle"
__copyright__ = "Copyright 2016, Jason M. Battle"
__license__ = "MIT"
__email__ = "[email protected]"
"""
from ESPEC import SH241
if __name__ == '__main__':
test = SH241()
test.OpenChannel()
if test.GetMode() == 'OFF':
test.SetPowerOn()
# Read Commands
test.GetROMVersion()
test.GetIntStatus()
test.GetIntMask()
test.GetAlarmStat()
test.GetKeyProtStat()
test.GetType()
test.GetMode()
test.GetCondition()
test.GetTemp()
test.GetHumid()
test.GetRefrigeCtl()
test.GetRelayStat()
test.GetHeaterStat()
test.GetProgStat()
test.GetProgData()
test.GetProgStepData(1)
# Write Commands
test.SetIntMask(0b01000000)
test.ResetIntStatus()
test.SetKeyProtectOn()
test.SetKeyProtectOff()
test.SetPowerOff()
test.SetPowerOn()
test.SetTemp(25.0)
test.SetHighTemp(155.0)
test.SetLowTemp(-45.0)
test.SetHumid(50.0)
test.SetHighHumid(100)
test.SetLowHumid(0)
test.SetHumidOff()
test.SetRefrigeCtl(9)
test.SetRelayOn(1)
test.SetRelayOff(1)
test.SetModeOff()
test.SetModeStandby()
test.SetModeConstant()
test.ProgramWrite()
test.SetModeProgram()
test.ProgramAdvance()
test.ProgramEnd()
test.SetModeStandby()
test.ProgramErase()
test.SetModeOff()
test.CloseChannel()
| mit | Python |
72d905e1e4098cf929213f59662c0c3090fd93cf | remove debug print | zbraniecki/pyast | pyast/dump/js.py | pyast/dump/js.py | import json
import pyast
from collections import OrderedDict
import sys
if sys.version >= '3':
basestring = str
else:
pass
def _dump_node_name(node):
return node.__class__.__name__.lower()
def _dump_node(node, name=None, indent=0):
if isinstance(node, basestring):
return node
elif isinstance(node, bool):
return node
struct = OrderedDict({'type': None})
if isinstance(node, pyast.Node):
struct['type'] = _dump_node_name(node)
for field in node._fields:
struct[field] = _dump_node(getattr(node, field))
elif isinstance(node, pyast.TypedList):
struct = []
for elem in node:
struct.append(_dump_node(elem))
elif isinstance(node, pyast.TypedDict):
struct = {}
for elem, key in node.items():
struct[key] =_dump_node(elem)
return struct
def dump(ast):
struct = _dump_node(ast)
o = json.dumps(struct, indent=2)
return o
| import json
import pyast
from collections import OrderedDict
import sys
if sys.version >= '3':
basestring = str
else:
pass
def _dump_node_name(node):
return node.__class__.__name__.lower()
def _dump_node(node, name=None, indent=0):
if isinstance(node, basestring):
return node
elif isinstance(node, bool):
return node
struct = OrderedDict({'type': None})
if isinstance(node, pyast.Node):
struct['type'] = _dump_node_name(node)
for field in node._fields:
struct[field] = _dump_node(getattr(node, field))
elif isinstance(node, pyast.TypedList):
struct = []
for elem in node:
struct.append(_dump_node(elem))
elif isinstance(node, pyast.TypedDict):
struct = {}
for elem, key in node.items():
struct[key] =_dump_node(elem)
return struct
def dump(ast):
struct = _dump_node(ast)
print(json)
o = json.dumps(struct, indent=2)
return o
| bsd-3-clause | Python |
c46e472755c7b7dd450626e136f31a29ca9a5321 | Fix a regression in accessing the username for the session. | reviewboard/rbtools,halvorlu/rbtools,beol/rbtools,datjwu/rbtools,davidt/rbtools,datjwu/rbtools,davidt/rbtools,reviewboard/rbtools,haosdent/rbtools,halvorlu/rbtools,haosdent/rbtools,haosdent/rbtools,reviewboard/rbtools,beol/rbtools,beol/rbtools,datjwu/rbtools,halvorlu/rbtools,davidt/rbtools | rbtools/utils/users.py | rbtools/utils/users.py | from __future__ import unicode_literals
import getpass
import logging
import sys
from six.moves import input
from rbtools.api.errors import AuthorizationError
from rbtools.commands import CommandError
def get_authenticated_session(api_client, api_root, auth_required=False):
"""Return an authenticated session.
None will be returned if the user is not authenticated, unless the
'auth_required' parameter is True, in which case the user will be prompted
to login.
"""
session = api_root.get_session(expand='user')
if not session.authenticated:
if not auth_required:
return None
logging.warning('You are not authenticated with the Review Board '
'server at %s, please login.' % api_client.url)
sys.stderr.write('Username: ')
username = input()
password = getpass.getpass(b'Password: ')
api_client.login(username, password)
try:
session = session.get_self()
except AuthorizationError:
raise CommandError('You are not authenticated.')
return session
def get_user(api_client, api_root, auth_required=False):
"""Return the user resource for the current session."""
session = get_authenticated_session(api_client, api_root, auth_required)
if session:
return session.user
return None
def get_username(api_client, api_root, auth_required=False):
"""Return the username for the current session."""
user = get_user(api_client, api_root, auth_required)
if user:
return user.username
return None
| from __future__ import unicode_literals
import getpass
import logging
import sys
from six.moves import input
from rbtools.api.errors import AuthorizationError
from rbtools.commands import CommandError
def get_authenticated_session(api_client, api_root, auth_required=False):
"""Return an authenticated session.
None will be returned if the user is not authenticated, unless the
'auth_required' parameter is True, in which case the user will be prompted
to login.
"""
session = api_root.get_session(expand='user')
if not session.authenticated:
if not auth_required:
return None
logging.warning('You are not authenticated with the Review Board '
'server at %s, please login.' % api_client.url)
sys.stderr.write('Username: ')
username = input()
password = getpass.getpass(b'Password: ')
api_client.login(username, password)
try:
session = session.get_self()
except AuthorizationError:
raise CommandError('You are not authenticated.')
return session
def get_user(api_client, api_root, auth_required=False):
"""Return the user resource for the current session."""
session = get_authenticated_session(api_client, api_root, auth_required)
if session:
return session.user
def get_username(api_client, api_root, auth_required=False):
"""Return the username for the current session."""
session = get_authenticated_session(api_client, api_root, auth_required)
if session:
return session.links.user.title
| mit | Python |
2beb589edc2f7b57be0d6a559e2f29471490bc91 | FIX py2 support! | hayd/pyfaker | pyfaker/utils.py | pyfaker/utils.py | import re
import random
import os
import json
from string import Formatter
class BaseFake(object):
pass
class CallFormatter(Formatter):
def get_field(self, field_name, *args, **kwargs):
obj, used_key = Formatter.get_field(self, field_name, *args, **kwargs)
return obj(), used_key
'''
class CallFormatter(Formatter):
def get_field(field_name, *args, **kwargs):
used_key = Formatter.get_field(field_name, *args, **kwargs)
return (used_key[0](),) + used_key[1:]
class CallFormatter(Formatter):
def get_field(self, field_name, *args, **kwargs):
if kwargs is None:
kwargs = kwargs.update(args[1])
else:
kwargs.update(args[1])
obj, used_key = Formatter.get_field(self, field_name, args[0:1], kwargs)
return obj(kwargs['cls']()), used_key
'''
call_fmt = CallFormatter()
def get_locales():
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
fpath = os.path.join(curpath(), 'locales.json')
with open(fpath, 'r') as f:
return json.load(f)
_all_locales = get_locales()
def to_camel(s):
"""returns string to camel caps
Example
to_camel('foo_bar') == 'FooBar'
"""
try:
return str(s.title().replace('_', '')) # assume the titles are ascii, else class name fail
except Exception: # TODO specify which kind of error
raise ValueError(
"%s doesn't convert to a good string for a class name" % s)
def update_loc(loc1, loc2):
loc1.update(loc2)
'''
def format_(s, current, fake_=None):
namespace = dict(current.__dict__, **{'cls': current}) # and fake_ ?
# first replace #s with digits then fill in rest using _locals
def callback(matchobj):
return '%s' % random.randrange(10)
s = re.sub(r'#', callback, s)
return s
fmt = CallFormatter()
return fmt.format(s, **namespace)
'''
| import re
import random
import os
import json
from string import Formatter
class BaseFake(object):
pass
class CallFormatter(Formatter):
def get_field(self, field_name, *args, **kwargs):
obj, used_key = Formatter.get_field(self, field_name, *args, **kwargs)
return obj(), used_key
'''
class CallFormatter(Formatter):
def get_field(field_name, *args, **kwargs):
used_key = Formatter.get_field(field_name, *args, **kwargs)
return (used_key[0](),) + used_key[1:]
class CallFormatter(Formatter):
def get_field(self, field_name, *args, **kwargs):
if kwargs is None:
kwargs = kwargs.update(args[1])
else:
kwargs.update(args[1])
obj, used_key = Formatter.get_field(self, field_name, args[0:1], kwargs)
return obj(kwargs['cls']()), used_key
'''
call_fmt = CallFormatter()
def get_locales():
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
fpath = os.path.join(curpath(), 'locales.json')
with open(fpath, 'r') as f:
return json.load(f)
_all_locales = get_locales()
def to_camel(s):
"""returns string to camel caps
Example
to_camel('foo_bar') == 'FooBar'
"""
try:
return s.title().replace('_', '') # assume the titles are ascii, else class name fail
except Exception: # TODO specify which kind of error
raise ValueError(
"%s doesn't convert to a good string for a class name" % s)
def update_loc(loc1, loc2):
loc1.update(loc2)
'''
def format_(s, current, fake_=None):
namespace = dict(current.__dict__, **{'cls': current}) # and fake_ ?
# first replace #s with digits then fill in rest using _locals
def callback(matchobj):
return '%s' % random.randrange(10)
s = re.sub(r'#', callback, s)
return s
fmt = CallFormatter()
return fmt.format(s, **namespace)
'''
| mit | Python |
cf644a17bd8c2abe436a37159bdf3eec7d2a358d | Remove premature optimization | RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline | luigi/tasks/quickgo/load_annotations.py | luigi/tasks/quickgo/load_annotations.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.ontologies import Ontologies
from .quickgo_data import QuickGoData
CONTROL_FILE = """
LOAD CSV
FROM '{filename}'
WITH ENCODING ISO-8859-14
HAVING FIELDS ({fields})
INTO {db_url}
TARGET COLUMNS ({columns})
SET
search_path = '{search_path}'
WITH
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists {load_table} (
rna_id varchar(50),
qualifier varchar(30),
assigned_by varchar(50),
extensions jsonb,
ontology_term_id varchar(15),
evidence_code varchar(15)
);
$$,
$$
truncate table {load_table};
$$
AFTER LOAD DO
$$
INSERT INTO {final_table} (
rna_id,
qualifier,
assigned_by,
extensions,
ontology_term_id,
evidence_code
) (
SELECT
rna_id,
qualifier,
assigned_by,
extensions,
ontology_term_id,
evidence_code
FROM {load_table}
)
ON CONFLICT (rna_id, qualifier, assigned_by, ontology_term_id, evidence_code)
DO UPDATE
SET
rna_id = excluded.rna_id,
qualifier = excluded.qualifier,
assigned_by = excluded.assigned_by,
extensions = excluded.extensions,
ontology_term_id = excluded.ontology_term_id,
evidence_code = excluded.evidence_code
;
$$,
$$
DROP TABLE {load_table};
$$
;
"""
class QuickGoLoadAnnotations(PGLoader):
def requires(self):
return [
QuickGoData(),
Ontologies(),
]
def control_file(self):
output = self.requires()[0].output()
table = 'go_term_annotations'
load_table = 'load_' + table
fields = ', '.join(output.annotations.headers)
return CONTROL_FILE.format(
filename=output.annotations.filename,
fields=fields,
columns=fields,
final_table=table,
load_table=load_table,
db_url=self.db_url(table=load_table),
search_path=self.db_search_path(),
)
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.ontologies import Ontologies
from .quickgo_data import QuickGoData
CONTROL_FILE = """
LOAD CSV
FROM '{filename}'
WITH ENCODING ISO-8859-14
HAVING FIELDS ({fields})
INTO {db_url}
TARGET COLUMNS ({columns})
SET
search_path = '{search_path}'
WITH
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists {load_table} (
rna_id varchar(50),
qualifier varchar(30),
assigned_by varchar(50),
extensions jsonb,
ontology_term_id varchar(15),
evidence_code varchar(15)
);
$$,
$$
truncate table {load_table};
$$
AFTER LOAD DO
$$
INSERT INTO {final_table} (
rna_id,
qualifier,
assigned_by,
extensions,
ontology_term_id,
evidence_code
) (
SELECT
rna_id,
qualifier,
assigned_by,
extensions,
ontology_term_id,
evidence_code
FROM {load_table}
)
ON CONFLICT (rna_id, qualifier, assigned_by, ontology_term_id, evidence_code)
DO UPDATE
SET
extensions = excluded.extensions
;
$$,
$$
DROP TABLE {load_table};
$$
;
"""
class QuickGoLoadAnnotations(PGLoader):
def requires(self):
return [
QuickGoData(),
Ontologies(),
]
def control_file(self):
output = self.requires()[0].output()
table = 'go_term_annotations'
load_table = 'load_' + table
fields = ', '.join(output.annotations.headers)
return CONTROL_FILE.format(
filename=output.annotations.filename,
fields=fields,
columns=fields,
final_table=table,
load_table=load_table,
db_url=self.db_url(table=load_table),
search_path=self.db_search_path(),
)
| apache-2.0 | Python |
23ab13f192b58f8b550aa2e16d5861e14535698a | Add slot fot empty_patch in cli pop command | vadmium/python-quilt,bjoernricks/python-quilt | quilt/cli/pop.py | quilt/cli/pop.py | # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from quilt.cli.meta import Command
from quilt.pop import Pop
class PopCommand(Command):
usage = "%prog pop [-a] [patch]"
name = "pop"
def add_args(self, parser):
parser.add_option("-a", "--all", help="remove all applied patches",
action="store_true")
def run(self, options, args):
pop = Pop(os.getcwd(), self.get_pc_dir())
pop.unapplying.connect(self.unapplying)
pop.unapplied.connect(self.unapplied)
pop.empty_patch.connect(self.empty_patch)
if options.all:
pop.unapply_all()
elif not args:
pop.unapply_top_patch()
else:
pop.unapply_patch(args[0])
def unapplying(self, patch):
print "Removing patch %s" % patch.get_name()
def unapplied(self, patch):
if not patch:
print "No patches applied"
else:
print "Now at patch %s" % patch.get_name()
def empty_patch(self, patch):
print "Patch %s appears to be empty, removing" % patch.get_name()
| # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from quilt.cli.meta import Command
from quilt.pop import Pop
class PopCommand(Command):
usage = "%prog pop [-a] [patch]"
name = "pop"
def add_args(self, parser):
parser.add_option("-a", "--all", help="remove all applied patches",
action="store_true")
def run(self, options, args):
pop = Pop(os.getcwd(), self.get_pc_dir())
pop.unapplying.connect(self.unapplying)
pop.unapplied.connect(self.unapplied)
if options.all:
pop.unapply_all()
elif not args:
pop.unapply_top_patch()
else:
pop.unapply_patch(args[0])
def unapplying(self, patch):
print "Removing patch %s" % patch.get_name()
def unapplied(self, patch):
if not patch:
print "No patches applied"
else:
print "Now at patch %s" % patch.get_name()
| mit | Python |
2c386cc3e81caffd906b68a6d527bd8bdd1d5ae5 | Replace nltk.model.NgramModel with nltk.ngrams | qe-team/marmot | marmot/features/lm_feature_extractor.py | marmot/features/lm_feature_extractor.py | from nltk import ngrams, word_tokenize
#from nltk.model import NgramModel
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.simple_corpus import SimpleCorpus
# Class that extracts various LM features
# Calling an external LM is very slow, so a new lm is constructed with nltk
class LMFeatureExtractor(FeatureExtractor):
def __init__(self, corpus_file, order=3):
self.order = order
self.lm = [ set() for i in range(order) ]
for line in open(corpus_file):
words = word_tokenize(line[:-1].decode('utf-8'))
for i in range(1,order):
self.lm[i] = self.lm[i].union( ngrams( words, i+1 ) )
self.lm[0] = self.lm[0].union(words)
def check_lm(self, ngram, side='left'):
for i in range(self.order, 0, -1):
if side == 'left':
cur_ngram = ngram[len(ngram)-i:]
elif side == 'right':
cur_ngram = ngram[:i]
if tuple(cur_ngram) in self.lm[i-1]:
return i
return 0
# returns a set of features related to LM
# currently extracting: highest order ngram including the word and its LEFT context,
# highest order ngram including the word and its RIGHT context
def get_features(self, context_obj):
left_ngram = self.check_lm( context_obj['target'][:context_obj['index']+1], side='left' )
right_ngram = self.check_lm( context_obj['target'][context_obj['index']:], side='right' )
return (left_ngram, right_ngram)
| from nltk.model import NgramModel
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.simple_corpus import SimpleCorpus
def check_lm_recursive(words, lm, low_order='left'):
if len(words) < lm._n:
return check_lm_recursive(words, lm._backoff, low_order=low_order)
if tuple(words) in lm._ngrams:
return lm._n
elif lm._n > 1:
if low_order == 'left':
return check_lm_recursive(words[1:], lm._backoff, low_order=low_order)
elif low_order == 'right':
return check_lm_recursive(words[:-1], lm._backoff, low_order=low_order)
else:
return 0
# Class that extracts various LM features
# Calling an external LM is very slow, so a new lm is constructed with nltk
class LMFeatureExtractor(FeatureExtractor):
def __init__(self, corpus_file, order=3):
# load the corpus
corp = SimpleCorpus(corpus_file)
# nltk LM requires all words in one list
all_words = [w for sent in [line for line in corp.get_texts()] for w in sent]
self.lm = NgramModel(order, all_words)
def check_lm_recursive(words, lm, low_order='left'):
if len(words) < lm._n:
return check_lm_recursive(words, lm._backoff, low_order=low_order)
if tuple(words) in lm._ngrams:
return lm._n
elif lm._n > 1:
if low_order == 'left':
return check_lm_recursive(words[1:], lm._backoff, low_order=low_order)
elif low_order == 'right':
return check_lm_recursive(words[:-1], lm._backoff, low_order=low_order)
else:
return 0
# returns a set of features related to LM
# currently extracting: highest order ngram including the word and its LEFT context,
# highest order ngram including the word and its RIGHT context
def get_features(self, context_obj):
left_ngram = check_lm_recursive(context_obj['target'][max(0, context_obj['index']-self.lm._n):context_obj['index']], self.lm, low_order='left')
right_ngram = check_lm_recursive(context_obj['target'][context_obj['index']:min(len(context_obj['target']),context_obj['index']+self.lm._n)], self.lm, low_order='right')
return (left_ngram, right_ngram)
| isc | Python |
5d393ff5c007bafb731aaf703a5225081b99f69a | Align the add/remove URL with the filter URL | eallrich/checkniner,eallrich/checkniner,eallrich/checkniner | cotracker/cotracker/urls.py | cotracker/cotracker/urls.py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from checkouts.views import (
PilotList,
PilotDetail,
AirstripList,
AirstripDetail,
BaseList,
BaseAttachedDetail,
BaseUnattachedDetail,
FilterFormView,
CheckoutUpdateFormView,
)
admin.autodiscover()
urlpatterns = patterns('',
url(r'^emerald/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url(
regex=r'^pilots/$',
view=PilotList.as_view(),
name='pilot_list',
),
url(
regex=r'^pilots/(?P<username>\w+)/$',
view=PilotDetail.as_view(),
name='pilot_detail',
),
url(
regex=r'^airstrips/$',
view=AirstripList.as_view(),
name='airstrip_list',
),
url(
regex=r'^airstrips/(?P<ident>\w+)/$',
view=AirstripDetail.as_view(),
name='airstrip_detail',
),
url(
regex=r'^bases/$',
view=BaseList.as_view(),
name='base_list',
),
url(
regex=r'^bases/(?P<ident>\w+)/attached/$',
view=BaseAttachedDetail.as_view(),
name='base_attached_detail',
),
url(
regex=r'^bases/(?P<ident>\w+)/unattached/$',
view=BaseUnattachedDetail.as_view(),
name='base_unattached_detail',
),
url(
regex=r'^checkouts/$',
view=FilterFormView.as_view(),
name='checkout_filter',
),
url(
regex=r'^checkouts/edit/$',
view=CheckoutUpdateFormView.as_view(),
name='checkout_update',
),
)
if settings.SERVE_STATIC:
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT,})
)
| from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from checkouts.views import (
PilotList,
PilotDetail,
AirstripList,
AirstripDetail,
BaseList,
BaseAttachedDetail,
BaseUnattachedDetail,
FilterFormView,
CheckoutUpdateFormView,
)
admin.autodiscover()
urlpatterns = patterns('',
url(r'^emerald/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url(
regex=r'^pilots/$',
view=PilotList.as_view(),
name='pilot_list',
),
url(
regex=r'^pilots/(?P<username>\w+)/$',
view=PilotDetail.as_view(),
name='pilot_detail',
),
url(
regex=r'^airstrips/$',
view=AirstripList.as_view(),
name='airstrip_list',
),
url(
regex=r'^airstrips/(?P<ident>\w+)/$',
view=AirstripDetail.as_view(),
name='airstrip_detail',
),
url(
regex=r'^bases/$',
view=BaseList.as_view(),
name='base_list',
),
url(
regex=r'^bases/(?P<ident>\w+)/attached/$',
view=BaseAttachedDetail.as_view(),
name='base_attached_detail',
),
url(
regex=r'^bases/(?P<ident>\w+)/unattached/$',
view=BaseUnattachedDetail.as_view(),
name='base_unattached_detail',
),
url(
regex=r'^checkouts/$',
view=FilterFormView.as_view(),
name='checkout_filter',
),
url(
regex=r'^update/$',
view=CheckoutUpdateFormView.as_view(),
name='checkout_update',
),
)
if settings.SERVE_STATIC:
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT,})
)
| mit | Python |
3c9d45ad67b1a1c274cc5ee7a78d174595445733 | Update websocket | EarEEG/eareeg-backend | websocket_data_collector.py | websocket_data_collector.py | #!venv/bin/python
'''
websocket_data_collector.py
This script uses websockets to transmit data collected by the NeuroPy module to a remote server.
'''
import NeuroPy.NeuroPy as NP
import socketIO_client
import json
import click
from threading import Lock
CLIENT_ID = "CLIENT1"
# declare this globally
socketIO = None
lock = None
def on_connect():
print("connected")
def on_disconnect():
print("disconnected")
def on_callback_response(*args):
print("On callback response: ", args)
# generic callback function for neuropy
# which sends the data collected over socketio
def generic_callback(variable_name, variable_val):
# generate the dictionary to send to the remote server
# as specified in the doc
if variable_name == "rawValue":
return
global filename
print("writing")
filename.write("{} {}\n".format(variable_name, variable_val))
def start_data_collection(serial_port, num_seconds=-1):
headset_obj = NP.NeuroPy(serial_port, 9600, log=False)
headset_obj.setCallBack("attention", generic_callback)
headset_obj.setCallBack("meditation", generic_callback)
headset_obj.setCallBack("rawValue", generic_callback)
headset_obj.setCallBack("delta", generic_callback)
headset_obj.setCallBack("theta", generic_callback)
headset_obj.setCallBack("lowAlpha", generic_callback)
headset_obj.setCallBack("highAlpha", generic_callback)
headset_obj.setCallBack("lowBeta", generic_callback)
headset_obj.setCallBack("highBeta", generic_callback)
headset_obj.setCallBack("lowGamma", generic_callback)
headset_obj.setCallBack("midGamma", generic_callback)
headset_obj.setCallBack("poorSignal", generic_callback)
headset_obj.setCallBack("blinkStrength", generic_callback)
headset_obj.start()
if num_seconds != -1:
time.sleep(num_seconds)
headset_obj.stop()
@click.command()
@click.argument('runfile')
@click.argument('clientid')
@click.option('--serial_port', default="/dev/tty.MindWaveMobile-SerialPo", help="Serial port of bluetooth headset")
@click.option('--time', default=5, help="Number of seconds to collect data")
def main(runfile, clientid, serial_port, time):
global filename
filename = open("{}:{}".format(runfile,clientid), "w")
start_data_collection(serial_port, time)
if __name__ == "__main__":
main()
| #!venv/bin/python
'''
websocket_data_collector.py
This script uses websockets to transmit data collected by the NeuroPy module to a remote server.
'''
import NeuroPy.NeuroPy as NP
import socketIO_client
import json
import click
from threading import Lock
CLIENT_ID = "CLIENT1"
# declare this globally
socketIO = None
lock = None
def on_connect():
print("connected")
def on_disconnect():
print("disconnected")
def on_callback_response(*args):
print("On callback response: ", args)
# generic callback function for neuropy
# which sends the data collected over socketio
def generic_callback(variable_name, variable_val):
# generate the dictionary to send to the remote server
# as specified in the doc
return_dict = {}
return_dict["client_id"] = CLIENT_ID
# for now, do nothing when setting rawData
if variable_name == "rawData":
return
return_dict["data"] = [{"type": variable_name, "value": variable_val}]
lock.acquire()
socketIO.emit("data", return_dict, on_callback_response)
lock.release()
def start_data_collection(serial_port, num_seconds=-1):
headset_obj = NP.NeuroPy(serial_port, 9600, log=False)
headset_obj.setCallBack("attention", generic_callback)
headset_obj.setCallBack("meditation", generic_callback)
headset_obj.setCallBack("rawValue", generic_callback)
headset_obj.setCallBack("delta", generic_callback)
headset_obj.setCallBack("theta", generic_callback)
headset_obj.setCallBack("lowAlpha", generic_callback)
headset_obj.setCallBack("highAlpha", generic_callback)
headset_obj.setCallBack("lowBeta", generic_callback)
headset_obj.setCallBack("highBeta", generic_callback)
headset_obj.setCallBack("lowGamma", generic_callback)
headset_obj.setCallBack("midGamma", generic_callback)
headset_obj.setCallBack("poorSignal", generic_callback)
headset_obj.setCallBack("blinkStrength", generic_callback)
headset_obj.start()
@click.command()
@click.argument('host')
@click.argument('port')
@click.option('--serial_port', default="/dev/tty.MindWaveMobile-SerialPo", help="Serial port of bluetooth headset")
@click.option('--time', default=-1, help="Number of seconds to collect data")
def main(host, port, serial_port, time):
lock = Lock()
socketIO = socketIO_client.SocketIO(host, port)
print("Got here")
#socketIO.on("connect", on_connect)
#socketIO.on("disconnected", on_disconnect)
#start_data_collection(serial_port, time)
for i in range(10):
socketIO.emit("data", {"test": i})
socketIO.wait(seconds=1)
if __name__ == "__main__":
main()
| mit | Python |
0cc7fbea3952485e8274c8df1b223fc791181035 | Complete migrate from django to toilets script | praekelt/go-imali-yethu-js,praekelt/go-imali-yethu-js,praekelt/go-imali-yethu-js | ona_migration_script/migrate_toilets.py | ona_migration_script/migrate_toilets.py | import argparse
from ona import OnaApiClient
def generate_location(lat, lon):
return ' '.join([str(lat), str(lon)])
CONVERSIONS = {
'code': 'toilet_code', 'section': 'toilet_section',
'cluster': 'toilet_cluster'}
ADDITIONS = {
'toilet_location': (generate_location, ['lat', 'lon'])
}
DEFAULTS = {
'toilet_state': 'no_issue', 'toilet_issue': '', 'toilet_issue_date': ''}
parser = argparse.ArgumentParser(description='Migrate submissions')
parser.add_argument(
'url', type=str,
help='The full URL to get the JSON toilet information from')
parser.add_argument(
'to_id', type=str,
help="The id (number) of the form to migrate submissions to")
parser.add_argument(
'username', type=str, help='The Ona username used to log in')
parser.add_argument(
'password', type=str, help='The Ona password used to log in')
args = parser.parse_args()
client = OnaApiClient(args.username, args.password)
def get_toilet_info_from_django():
url = args.url
headers = {
"Content-type": "application/json; charset=utf-8"
}
r = client.session.request(
'GET', url, headers=headers)
r.raise_for_status()
return r.json()
def get_fields_from_form(formid):
form = client.get_form_information(formid)
fields = []
for child in form.get('children'):
fields.append(child.get('name'))
return fields
toilet_data = get_toilet_info_from_django()
to_fields = get_fields_from_form(args.to_id)
for toilet in toilet_data:
new_toilet = toilet.copy()
# Add fields
for field, (function, arguments) in ADDITIONS.iteritems():
arguments = [toilet[arg] for arg in arguments]
new_toilet[field] = function(*arguments)
# Migrate fields
for field in toilet:
if field in CONVERSIONS:
new_toilet[CONVERSIONS[field]] = toilet[field]
# Remove deleted fields
if field not in to_fields:
del new_toilet[field]
# Add missing fields
for field in to_fields:
if field not in new_toilet:
new_toilet[field] = DEFAULTS.get(field, None)
# Post submission to new form
form_id_string = client.get_form(args.to_id)['id_string']
try:
client.submission({
"id": form_id_string,
"submission": new_toilet,
})
except:
print "Error sending form %s. Submission: " % form_id_string
print new_toilet
| bsd-3-clause | Python |
|
a7827ecf5e480c228c881180e63633712e3dbc3c | Modify ARDUINO_SEARCH_PATHS to include default ubuntu package location | wheeler-microfluidics/dmf-control-board-firmware,wheeler-microfluidics/dmf-control-board-firmware,wheeler-microfluidics/dmf-control-board-firmware,wheeler-microfluidics/dmf-control-board-firmware,wheeler-microfluidics/dmf_control_board_plugin | site_scons/find_avrdude.py | site_scons/find_avrdude.py | import sys
import os
from itertools import chain
from path import path
home_dir = path('~').expand()
ARDUINO_SEARCH_PATHS = [home_dir, ]
if os.name == 'nt':
from win32com.shell import shell, shellcon
mydocs = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0)
AVRDUDE_NAME = 'avrdude.exe'
ARDUINO_SEARCH_PATHS += [path(mydocs),
path('%SYSTEMDRIVE%/').expand(),
path('%PROGRAMFILES%').expand(), ]
else:
AVRDUDE_NAME = 'avrdude'
ARDUINO_SEARCH_PATHS += [path("/usr/share/")]
def get_arduino_paths():
fs = []
for p in ARDUINO_SEARCH_PATHS:
fs += get_avrdude_list(p)
if not fs:
print >> sys.stderr, '''\
ERROR: arduino install directory not found!
Searched:
%s''' % '\n '.join(ARDUINO_SEARCH_PATHS)
sys.exit(1)
fs.sort(key=lambda x: -x.ctime)
avrdude = fs[0]
p = avrdude.parent
while p and p.name != 'hardware':
p = p.parent
if not p:
print >> sys.stderr, '''Arduino install path not found.'''
sys.exit(1)
arduino_path = p.parent
avrdude_conf = list(arduino_path.walkfiles('avrdude.conf'))
if not avrdude_conf:
print >> sys.stderr, '''avrdude configuration (avrdude.conf) path not found.'''
sys.exit(1)
else:
avrdude_conf = avrdude_conf[0]
return arduino_path, avrdude, avrdude_conf
def get_avrdude_list(p):
return list(set(chain(*[d.walkfiles(AVRDUDE_NAME) for d in p.dirs('arduino*')])))
if __name__ == '__main__':
arduino_path, avrdude, avrdude_conf = get_arduino_paths()
print 'found arduino path:', arduino_path
print 'using newest avrdude:', avrdude
print 'using avrdude config:', avrdude_conf
| import sys
import os
from itertools import chain
from path import path
home_dir = path('~').expand()
ARDUINO_SEARCH_PATHS = [home_dir, ]
if os.name == 'nt':
from win32com.shell import shell, shellcon
mydocs = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0)
AVRDUDE_NAME = 'avrdude.exe'
ARDUINO_SEARCH_PATHS += [path(mydocs),
path('%SYSTEMDRIVE%/').expand(),
path('%PROGRAMFILES%').expand(), ]
else:
AVRDUDE_NAME = 'avrdude'
ARDUINO_SEARCH_PATHS += [home_dir / path('local/opt'), ]
def get_arduino_paths():
fs = []
for p in ARDUINO_SEARCH_PATHS:
fs += get_avrdude_list(p)
if not fs:
print >> sys.stderr, '''\
ERROR: arduino install directory not found!
Searched:
%s''' % '\n '.join(ARDUINO_SEARCH_PATHS)
sys.exit(1)
fs.sort(key=lambda x: -x.ctime)
avrdude = fs[0]
p = avrdude.parent
while p and p.name != 'hardware':
p = p.parent
if not p:
print >> sys.stderr, '''Arduino install path not found.'''
sys.exit(1)
arduino_path = p.parent
avrdude_conf = list(arduino_path.walkfiles('avrdude.conf'))
if not avrdude_conf:
print >> sys.stderr, '''avrdude configuration (avrdude.conf) path not found.'''
sys.exit(1)
else:
avrdude_conf = avrdude_conf[0]
return arduino_path, avrdude, avrdude_conf
def get_avrdude_list(p):
return list(set(chain(*[d.walkfiles(AVRDUDE_NAME) for d in p.dirs('arduino*')])))
if __name__ == '__main__':
arduino_path, avrdude, avrdude_conf = get_arduino_paths()
print 'found arduino path:', arduino_path
print 'using newest avrdude:', avrdude
print 'using avrdude config:', avrdude_conf
| bsd-3-clause | Python |
028abc55a2c0e7bf9d727fa73eafff98e5f917d2 | Add sparkhistogram package | LucaCanali/Miscellaneous | Spark_Notes/Spark_Histograms/python/setup.py | Spark_Notes/Spark_Histograms/python/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
description = "Sparkhistogram contains helper functions for generating data histograms with the Spark DataFrame API."
long_description = """
Use this package, sparkhistogram, together with PySpark for generating data histograms using the Spark DataFrame API.
Currently, the package contains only two functions covering some of the most common and low-complexity use cases.
Use:
- `from sparkhistogram import computeHistogram` -> computeHistogram is a function to compute the count/frequency histogram of a given DataFrame column
- `from sparkhistogram import computeWeightedHistogram` -> computeWeightedHistogram is a function to compute the weighted histogram of a given DataFrame column
```
def computeHistogram(df: "DataFrame", value_col: str, min: int, max: int, bins: int) -> "DataFrame"
Parameters
----------
df: the dataframe with the data to compute
value_col: column name on which to compute the histogram
min: minimum value in the histogram
max: maximum value in the histogram
bins: number of histogram buckets to compute
Output DataFrame
----------------
bucket: the bucket number, range from 1 to bins (included)
value: midpoint value of the given bucket
count: number of values in the bucket
```
## Run this example in the PySpark shell
Note: requires PySpark version 3.1 or higher.
`bin/pyspark`
```
# import the helper function to generate the histogram using Spark DataFrame operations
from sparkhistogram import computeHistogram
# generate some toy data
num_events = 100
scale = 100
seed = 4242
df = spark.sql(f"select random({seed}) * {scale} as random_value from range({num_events})")
# define the DataFrame transformation to compute the histogram
histogram = computeHistogram(df, "random_value", -20, 90, 11)
# with Spark 3.3.0 and higher you can also use df.transform
# histogram = df.transform(computeHistogram, "random_value", -20, 90, 11)
# fetch and display the (toy) data
histogram.show()
# expected output:
+------+-----+-----+
|bucket|value|count|
+------+-----+-----+
| 1|-15.0| 0|
| 2| -5.0| 0|
| 3| 5.0| 6|
| 4| 15.0| 10|
| 5| 25.0| 15|
| 6| 35.0| 12|
| 7| 45.0| 9|
| 8| 55.0| 7|
| 9| 65.0| 10|
| 10| 75.0| 16|
| 11| 85.0| 7|
+------+-----+-----+
```
More details and notebooks with matplotlib visualization of the histograms at:
https://github.com/LucaCanali/Miscellaneous/blob/master/Spark_Notes/Spark_DataFrame_Histograms.md
"""
setup(name='sparkhistogram',
version='0.1',
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author='Luca Canali',
author_email='[email protected]',
url='https://github.com/LucaCanali/Miscellaneous/blob/master/Spark_Notes/Spark_DataFrame_Histograms.md',
license='Apache License, Version 2.0',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
python_requires='>=3.6',
install_requires=[],
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
]
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
description = "sparkhistogram contains helper functions for generating data histograms with the Spark DataFrame API and with Spark SQL."
long_description = "sparkhistogram contains helper functions for generating data histograms with the Spark DataFrame API and with Spark SQL."
setup(name='sparkhistogram',
version='0.1',
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author='Luca Canali',
author_email='[email protected]',
url='https://github.com/LucaCanali/Miscellaneous/blob/master/Spark_Notes/Spark_DataFrame_Histograms.md',
license='Apache License, Version 2.0',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
python_requires='>=3.6',
install_requires=[],
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
]
)
| apache-2.0 | Python |
03c237551aa08cb70fd397cc348e75531cdabd0e | fix schemas for password views | SUNET/eduid-webapp,SUNET/eduid-webapp,SUNET/eduid-webapp | src/eduid_webapp/security/schemas.py | src/eduid_webapp/security/schemas.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from marshmallow import fields
from eduid_common.api.schemas.base import FluxStandardAction, EduidSchema
class CredentialSchema(EduidSchema):
credential_type = fields.String(required=True)
created_ts = fields.String(required=True)
success_ts = fields.String(required=True)
class CredentialList(EduidSchema):
credentials = fields.Nested(CredentialSchema, many=True)
csrf_token = fields.String(required=True)
class SecurityResponseSchema(FluxStandardAction):
payload = fields.Nested(CredentialList, only=('credentials', 'csrf_token'))
csrf_token = fields.String(attribute='csrf_token')
class CsrfSchema(EduidSchema):
csrf_token = fields.String(required=True)
class SecurityPasswordSchema(EduidSchema):
password = fields.String(required=True)
new_password = fields.String(required=True)
repeat_password = fields.String(required=True)
csrf_token = fields.String(required=True)
| # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from marshmallow import fields
from eduid_common.api.schemas.base import EduidSchema
class CredentialSchema(EduidSchema):
credential_type = fields.String(required=True)
created_ts = fields.String(required=True)
success_ts = fields.String(required=True)
class CredentialList(EduidSchema):
credentials = fields.Nested(CredentialSchema, many=True)
csrf_token = fields.String(required=True)
class SecurityResponseSchema(EduidSchema):
payload = fields.Nested(CredentialList, only=('credentials', 'csrf_token'))
class CsrfSchema(EduidSchema):
csrf_token = fields.String(required=True)
class SecurityPasswordSchema(EduidSchema):
password = fields.String(required=True)
new_password = fields.String(required=True)
repeat_password = fields.String(required=True)
csrf_token = fields.String(required=True)
| bsd-3-clause | Python |
daa92b15852b3572d7ef03392b061184dbbc76c1 | fix to use the right cert in AuthServer provisioning | kevinawalsh/cloudproxy,William-J-Earl/cloudproxy,jlmucb/cloudproxy,cjpatton/cloudproxy,jlmucb/cloudproxy,jethrogb/cloudproxy,jethrogb/cloudproxy,William-J-Earl/cloudproxy,cjpatton/cloudproxy,tmroeder/cloudproxy,jlmucb/cloudproxy,jethrogb/cloudproxy,cjpatton/cloudproxy,tmroeder/cloudproxy,kevinawalsh/cloudproxy,jethrogb/cloudproxy,jlmucb/cloudproxy,cjpatton/cloudproxy,cjpatton/cloudproxy,jethrogb/cloudproxy,tmroeder/cloudproxy,William-J-Earl/cloudproxy,tmroeder/cloudproxy,jlmucb/cloudproxy,William-J-Earl/cloudproxy,tmroeder/cloudproxy,William-J-Earl/cloudproxy | Code/scripts/provisionAuthServer.py | Code/scripts/provisionAuthServer.py | #!/usr/bin/env python
from __future__ import print_function
from subprocess import check_call
import os
import argparse
parser = argparse.ArgumentParser(description="Generates the keys for authServer.exe")
parser.add_argument("--scriptPath", required="true", help="The path to the directory that contains the scripts used by provisionAuthServer.py")
args = parser.parse_args()
check_call([os.path.join(args.scriptPath, "createPrincipal.py"), "5", "AuthServer"])
check_call(["./cryptUtility.exe", "-EncapsulateMessage", "authServer/cert", "authServer/signingKeyMetaData", "AuthServerPrivateKey.xml", "authServer/signingKey"])
check_call(["cp", "AuthServerPublicKey.xml", "authServer/signingCert"])
| #!/usr/bin/env python
from __future__ import print_function
from subprocess import check_call
import os
import argparse
parser = argparse.ArgumentParser(description="Generates the keys for authServer.exe")
parser.add_argument("--scriptPath", required="true", help="The path to the directory that contains the scripts used by provisionAuthServer.py")
args = parser.parse_args()
check_call([os.path.join(args.scriptPath, "createPrincipal.py"), "5", "AuthServer"])
check_call(["./cryptUtility.exe", "-EncapsulateMessage", "AuthServerPublicKey.xml", "authServer/signingKeyMetaData", "AuthServerPrivateKey.xml", "authServer/signingKey"])
check_call(["cp", "AuthServerPublicKey.xml", "authServer/signingCert"])
| apache-2.0 | Python |
fe4c62acd52a4060eebf4284c15c465970ea8932 | remove duplicate enum key (#7173) | Azure/azure-sdk-for-python,Azure/azure-sdk-for-python,Azure/azure-sdk-for-python,Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/_luis_authoring_client_enums.py | sdk/cognitiveservices/azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/_luis_authoring_client_enums.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class TrainingStatus(str, Enum):
needs_training = "NeedsTraining"
in_progress = "InProgress"
trained = "Trained"
class OperationStatusType(str, Enum):
failed = "Failed"
success = "Success"
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class TrainingStatus(str, Enum):
needs_training = "NeedsTraining"
in_progress = "InProgress"
trained = "Trained"
class OperationStatusType(str, Enum):
failed = "Failed"
failed = "FAILED"
success = "Success"
| mit | Python |
5f1f1145d4f01f4b30e8782d284feb44781c21ad | Use sorted on the set to parametrize tests so that pytest-xdist works | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | tests/cupyx_tests/scipy_tests/special_tests/test_ufunc_dispatch.py | tests/cupyx_tests/scipy_tests/special_tests/test_ufunc_dispatch.py | import numpy
import cupy
import scipy.special
import cupyx.scipy.special
from cupy import testing
import pytest
scipy_ufuncs = {
f
for f in scipy.special.__all__
if isinstance(getattr(scipy.special, f), numpy.ufunc)
}
cupyx_scipy_ufuncs = {
f
for f in dir(cupyx.scipy.special)
if isinstance(getattr(cupyx.scipy.special, f), cupy.ufunc)
}
@testing.gpu
@testing.with_requires("scipy")
@pytest.mark.parametrize("ufunc", sorted(cupyx_scipy_ufuncs & scipy_ufuncs))
class TestUfunc:
@testing.numpy_cupy_allclose(atol=1e-5)
def test_dispatch(self, xp, ufunc):
ufunc = getattr(scipy.special, ufunc)
# some ufunc (like sph_harm) do not work with float inputs
# therefore we retrieve the types from the ufunc itself
types = ufunc.types[0]
args = [
cupy.testing.shaped_random((5,), xp, dtype=types[i])
for i in range(ufunc.nargs - 1)
]
res = ufunc(*args)
assert type(res) == xp.ndarray
return res
| import numpy
import cupy
import scipy.special
import cupyx.scipy.special
from cupy import testing
import pytest
scipy_ufuncs = {
f
for f in scipy.special.__all__
if isinstance(getattr(scipy.special, f), numpy.ufunc)
}
cupyx_scipy_ufuncs = {
f
for f in dir(cupyx.scipy.special)
if isinstance(getattr(cupyx.scipy.special, f), cupy.ufunc)
}
@testing.gpu
@testing.with_requires("scipy")
@pytest.mark.parametrize("ufunc", cupyx_scipy_ufuncs & scipy_ufuncs)
class TestUfunc:
@testing.numpy_cupy_allclose(atol=1e-5)
def test_dispatch(self, xp, ufunc):
ufunc = getattr(scipy.special, ufunc)
# some ufunc (like sph_harm) do not work with float inputs
# therefore we retrieve the types from the ufunc itself
types = ufunc.types[0]
args = [
cupy.testing.shaped_random((5,), xp, dtype=types[i])
for i in range(ufunc.nargs - 1)
]
res = ufunc(*args)
assert type(res) == xp.ndarray
return res
| mit | Python |
030e38a7d69bca5bbe72b51c5fbc667793ffce6b | Expand test history coverage | getsentry/zeus,getsentry/zeus,getsentry/zeus,getsentry/zeus | tests/zeus/api/resources/test_repository_tests_history_by_build.py | tests/zeus/api/resources/test_repository_tests_history_by_build.py | from zeus import factories
def test_repository_tests_history_by_build(
client,
default_login,
default_build,
default_testcase,
default_repo,
default_repo_access,
default_source,
):
build2 = factories.BuildFactory(source=default_source, finished=True)
job2 = factories.JobFactory(build=build2)
factories.TestCaseFactory(job=job2, name=default_testcase.name, failed=True)
build3 = factories.BuildFactory(source=default_source, finished=True)
job3 = factories.JobFactory(build=build3)
testcase2 = factories.TestCaseFactory(job=job3, passed=True)
build4 = factories.BuildFactory(source=default_source, finished=True)
job4 = factories.JobFactory(build=build4)
factories.TestCaseFactory(job=job4, name=default_testcase.name, passed=True)
resp = client.get(
"/api/repos/{}/tests-by-build".format(
default_repo.get_full_name(), default_testcase.hash
)
)
assert resp.status_code == 200
data = resp.json()
assert data["results"] == {
default_testcase.name: ["passed", None, "failed", "passed"],
testcase2.name: [None, "passed", None, None],
}
assert len(data["builds"]) == 4
assert data["builds"][0]["id"] == str(build4.id)
assert data["builds"][1]["id"] == str(build3.id)
assert data["builds"][2]["id"] == str(build2.id)
assert data["builds"][3]["id"] == str(default_build.id)
| from zeus import factories
def test_repository_tests_history_by_build(
client,
default_login,
default_build,
default_testcase,
default_repo,
default_repo_access,
default_source,
):
other_build = factories.BuildFactory(source=default_source, finished=True)
other_job = factories.JobFactory(build=other_build)
factories.TestCaseFactory(job=other_job, name=default_testcase.name, failed=True)
resp = client.get(
"/api/repos/{}/tests-by-build".format(
default_repo.get_full_name(), default_testcase.hash
)
)
assert resp.status_code == 200
data = resp.json()
assert data["results"] == {default_testcase.name: ["failed", "passed"]}
assert len(data["builds"]) == 2
assert data["builds"][0]["id"] == str(other_build.id)
assert data["builds"][1]["id"] == str(default_build.id)
| apache-2.0 | Python |
fabd8e5a1fbb8dd083b05b053320b090fedad119 | Fix cryptostate to no longer assign multiple states at once (issue #620) | jparyani/Mailpile,jparyani/Mailpile,jparyani/Mailpile,laborautonomo/Mailpile,laborautonomo/Mailpile,laborautonomo/Mailpile | mailpile/plugins/cryptostate.py | mailpile/plugins/cryptostate.py | from gettext import gettext as _
from mailpile.plugins import PluginManager
from mailpile.crypto.state import EncryptionInfo, SignatureInfo
_plugins = PluginManager(builtin=__file__)
##[ Keywords ]################################################################
def text_kw_extractor(index, msg, ctype, text):
kw = set()
if ('-----BEGIN PGP' in text and '\n-----END PGP' in text):
kw.add('pgp:has')
kw.add('crypto:has')
return kw
def meta_kw_extractor(index, msg_mid, msg, msg_size, msg_ts):
kw, enc, sig = set(), set(), set()
def crypto_eval(part):
# This is generic
if part.encryption_info.get('status') != 'none':
enc.add('mp_%s-%s' % ('enc', part.encryption_info['status']))
kw.add('crypto:has')
if part.signature_info.get('status') != 'none':
sig.add('mp_%s-%s' % ('sig', part.signature_info['status']))
kw.add('crypto:has')
# This is OpenPGP-specific
if (part.encryption_info.get('protocol') == 'openpgp'
or part.signature_info.get('protocol') == 'openpgp'):
kw.add('pgp:has')
# FIXME: Other encryption protocols?
def choose_one(fmt, statuses, ordering):
for o in ordering:
status = (fmt % o)
if status in statuses:
return set([status])
return set(list(statuses)[:1])
# Evaluate all the message parts
crypto_eval(msg)
for part in msg.walk():
crypto_eval(part)
# OK, we should have exactly encryption state...
if len(enc) < 1:
enc.add('mp_enc-none')
elif len(enc) > 1:
enc = choose_one('mp_enc-%s', enc, EncryptionInfo.STATUSES)
# ... and exactly one signature state.
if len(sig) < 1:
sig.add('mp_sig-none')
elif len(sig) > 1:
sig = choose_one('mp_sig-%s', sig, SignatureInfo.STATUSES)
# Emit tags for our states
for tname in (enc | sig):
tag = index.config.get_tags(slug=tname)
if tag:
kw.add('%s:in' % tag[0]._key)
return list(kw)
_plugins.register_text_kw_extractor('crypto_tkwe', text_kw_extractor)
_plugins.register_meta_kw_extractor('crypto_mkwe', meta_kw_extractor)
##[ Search helpers ]##########################################################
def search(config, idx, term, hits):
#
# FIXME: Translate things like pgp:signed into a search for all the
# tags that have signatures (good or bad).
#
return []
_plugins.register_search_term('crypto', search)
_plugins.register_search_term('pgp', search)
| from gettext import gettext as _
from mailpile.plugins import PluginManager
_plugins = PluginManager(builtin=__file__)
##[ Keywords ]################################################################
def text_kw_extractor(index, msg, ctype, text):
kw = set()
if ('-----BEGIN PGP' in text and '\n-----END PGP' in text):
kw.add('pgp:has')
kw.add('crypto:has')
return kw
def meta_kw_extractor(index, msg_mid, msg, msg_size, msg_ts):
kw, enc, sig = set(), set(), set()
for part in msg.walk():
enc.add('mp_%s-%s' % ('enc', part.encryption_info['status']))
sig.add('mp_%s-%s' % ('sig', part.signature_info['status']))
# This is generic
if (part.encryption_info.get('status') != 'none'
or part.signature_info.get('status') != 'none'):
kw.add('crypto:has')
# This is OpenPGP-specific
if (part.encryption_info.get('protocol') == 'openpgp'
or part.signature_info.get('protocol') == 'openpgp'):
kw.add('pgp:has')
# FIXME: Other encryption protocols?
for tname in (enc | sig):
tag = index.config.get_tags(slug=tname)
if tag:
kw.add('%s:in' % tag[0]._key)
return list(kw)
_plugins.register_text_kw_extractor('crypto_tkwe', text_kw_extractor)
_plugins.register_meta_kw_extractor('crypto_mkwe', meta_kw_extractor)
##[ Search helpers ]##########################################################
def search(config, idx, term, hits):
#
# FIXME: Translate things like pgp:signed into a search for all the
# tags that have signatures (good or bad).
#
return []
_plugins.register_search_term('crypto', search)
_plugins.register_search_term('pgp', search)
| apache-2.0 | Python |
d5c30bdae34450b4052f83f773ef993e89fc8bef | Prepare 1.1 release | nr-plugins/c4ddev,nr-plugins/c4ddev | c4ddev.pyp | c4ddev.pyp | # Copyright (C) 2014-2016 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'Niklas Rosenstein <[email protected]>'
__version__ = '1.1'
import os
import sys
import c4d
_added_paths = []
def add_path(path, module=sys):
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(__file__), path)
if path not in module.path:
module.path.append(path)
_added_paths.append((module, path))
# The third party modules in this plugin should be available globally.
add_path('lib/py-shroud')
add_path('lib/requests')
import shroud
add_path('lib', module=shroud)
add_path('lib/py-localimport', module=shroud)
def load_extensions():
extensions = []
ext_dir = os.path.join(os.path.dirname(__file__), 'ext')
for file in os.listdir(ext_dir):
if file.endswith('.py'):
extensions.append(shroud.require(os.path.join(ext_dir, file)))
return extensions
extensions = load_extensions()
def PluginMessage(msg_type, data):
if msg_type == c4d.C4DPL_RELOADPYTHONPLUGINS:
for mod, path in _added_paths:
try: mod.path.remove(path)
except ValueError: pass
for extension in extensions:
if hasattr(extension, 'PluginMessage'):
extension.PluginMessage(msg_type, data)
return True
| # Copyright (C) 2014-2016 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'Niklas Rosenstein <[email protected]>'
__version__ = '1.0'
import os
import sys
import c4d
_added_paths = []
def add_path(path, module=sys):
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(__file__), path)
if path not in module.path:
module.path.append(path)
_added_paths.append((module, path))
# The third party modules in this plugin should be available globally.
add_path('lib/py-shroud')
add_path('lib/requests')
import shroud
add_path('lib', module=shroud)
add_path('lib/py-localimport', module=shroud)
def load_extensions():
extensions = []
ext_dir = os.path.join(os.path.dirname(__file__), 'ext')
for file in os.listdir(ext_dir):
if file.endswith('.py'):
extensions.append(shroud.require(os.path.join(ext_dir, file)))
return extensions
extensions = load_extensions()
def PluginMessage(msg_type, data):
if msg_type == c4d.C4DPL_RELOADPYTHONPLUGINS:
for mod, path in _added_paths:
try: mod.path.remove(path)
except ValueError: pass
for extension in extensions:
if hasattr(extension, 'PluginMessage'):
extension.PluginMessage(msg_type, data)
return True
| mit | Python |
8c0e1a976e6341d565140725d51562cc9021f90e | add hostname to all messages | markokr/cc,markokr/cc | cc/reqs.py | cc/reqs.py | import time
from cc.json import Struct, Field
from cc.message import CCMessage
from socket import gethostname
__all__ = ['LogMessage', 'InfofileMessage', 'JobRequestMessage', 'JobConfigReplyMessage', 'TaskRegisterMessage', 'TaskSendMessage']
class BaseMessage(Struct):
req = Field(str)
hostname = Field(str, default = gethostname())
def send_to(self, sock):
cmsg = CCMessage(jmsg = self)
sock.send_multipart(cmsg.zmsg)
class LogMessage(BaseMessage):
"log.*"
level = Field(str)
service_type = Field(str)
job_name = Field(str)
msg = Field(str)
time = Field(float)
pid = Field(int)
line = Field(int)
function = Field(str)
class InfofileMessage(BaseMessage):
"pub.infofile"
mtime = Field(float)
filename = Field(str)
body = Field(str)
class JobConfigRequestMessage(BaseMessage):
"job.config"
job_name = Field(str)
class JobConfigReplyMessage(BaseMessage):
"job.config"
job_name = Field(str)
config = Field(dict)
class TaskRegisterMessage(BaseMessage):
"req.task.register"
host = Field(str)
class TaskSendMessage(BaseMessage):
"req.task.send"
host = Field(str)
def parse_json(js):
return Struct.from_json(js)
| import time
from cc.json import Struct, Field
from cc.message import CCMessage
__all__ = ['LogMessage', 'InfofileMessage', 'JobRequestMessage', 'JobConfigReplyMessage', 'TaskRegisterMessage', 'TaskSendMessage']
class BaseMessage(Struct):
req = Field(str)
def send_to(self, sock):
cmsg = CCMessage(jmsg = self)
sock.send_multipart(cmsg.zmsg)
class LogMessage(BaseMessage):
"log.*"
level = Field(str)
service_type = Field(str)
job_name = Field(str)
msg = Field(str)
time = Field(float)
pid = Field(int)
line = Field(int)
function = Field(str)
class InfofileMessage(BaseMessage):
"pub.infofile"
mtime = Field(float)
filename = Field(str)
body = Field(str)
class JobConfigRequestMessage(BaseMessage):
"job.config"
job_name = Field(str)
class JobConfigReplyMessage(BaseMessage):
"job.config"
job_name = Field(str)
config = Field(dict)
class TaskRegisterMessage(BaseMessage):
"req.task.register"
host = Field(str)
class TaskSendMessage(BaseMessage):
"req.task.send"
host = Field(str)
def parse_json(js):
return Struct.from_json(js)
| bsd-2-clause | Python |
89d9328696a01e70428fccfa890d847e91f5f5c4 | Fix copy-paste bug | nanonyme/syscertifi | certifi.py | certifi.py | import platform
if platform.system() == "Windows":
import wincertstore
import atexit
import ssl
certfile = wincertstore.CertFile()
certfile.addstore("CA")
certfile.addstore("ROOT")
atexit.register(certfile.close) # cleanup and remove files on shutdown
def where():
return certfile
else:
import ssl
def where():
return ssl.get_default_verify_paths().openssl_cafile
| import platform
if platform.system() == "Windows":
import wincertstore
import atexit
import ssl
certfile = wincertstore.CertFile()
certfile.addstore("CA")
certfile.addstore("ROOT")
atexit.register(certfile.close) # cleanup and remove files on shutdown
def where():
return certfile
else:
import ssl
def where():
return ssl.ssl.get_default_verify_paths().openssl_cafile | isc | Python |
57d9d4fe1b46d9dd45629dc5fc461c0b8c51c5ec | Fix music helper | Blindle/Raspberry | src/helpers/musicHelper.py | src/helpers/musicHelper.py | import pygame
import os
import sys
sys.path.append(os.path.dirname(__file__) + "/../audios/letters")
pygame.mixer.init()
def play_word(word):
for letter in word:
_play_letter(letter)
def _play_letter(letter):
pygame.mixer.music.load("audios/letters/" + letter.lower() + ".mp3")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
| #import pygame
import os
import sys
import pyglet
sys.path.append(os.path.dirname(__file__) + "/../audios/letters")
pyglet.options['audio'] = ('openal', 'pulse', 'silent')
player = pyglet.media.Player()
#pygame.mixer.init()
def play_file(file_path):
pass
#pygame.mixer.music.load(file_path)
# playAudioLoaded()
def play_word(word):
#CHANNEL.stop()
# pygame.mixer.music.load(os.path.dirname(__file__) + "/../audios/letters/a.mp3")
# pygame.mixer.music.play()
# pygame.mixer.music.queue(os.path.dirname(__file__) + "/../audios/letters/e.mp3")
# pygame.mixer.music.stop()
first = True
for letter in word:
path = str(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3")
src = pyglet.media.load(path, streaming=False)
player.queue(src)
# if first:
# first = False
# pygame.mixer.music.load(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3")
#pygame.mixer.music.play()
# else:
# pygame.mixer.music.queue(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3")
#_play_letter(letter)
# pygame.mixer.music.play()
player.play()
def _play_letter(letter):
pass
#pygame.mixer.music.load("audios/letters/" + letter.lower() + ".mp3")
#pygame.mixer.music.play()
#while pygame.mixer.music.get_busy() == True:
# continue
#def playAudioLoaded():
| mit | Python |
38d96e4ddbe44af8f028dfb29eca17dc8ecd478d | test case for clean module | prem2014/html2latex,prem2014/html2latex,psjinx/html2latex,psjinx/html2latex | src/html2latex/__init__.py | src/html2latex/__init__.py | from .html2latex import html2latex
html2latex
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| from .html2latex import html2latex
html2latex
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
| mit | Python |
46f15a00d2324da4b9f12c9168ddda8dddb1b607 | use notebook-style for plot_logistic_path.py (#22536) | saiwing-yeung/scikit-learn,anntzer/scikit-learn,AlexandreAbraham/scikit-learn,lesteve/scikit-learn,ivannz/scikit-learn,scikit-learn/scikit-learn,TomDLT/scikit-learn,scikit-learn/scikit-learn,espg/scikit-learn,AlexandreAbraham/scikit-learn,scikit-learn/scikit-learn,betatim/scikit-learn,saiwing-yeung/scikit-learn,lesteve/scikit-learn,manhhomienbienthuy/scikit-learn,AlexandreAbraham/scikit-learn,jakirkham/scikit-learn,saiwing-yeung/scikit-learn,saiwing-yeung/scikit-learn,betatim/scikit-learn,anntzer/scikit-learn,vinayak-mehta/scikit-learn,jakirkham/scikit-learn,anntzer/scikit-learn,betatim/scikit-learn,ivannz/scikit-learn,jakirkham/scikit-learn,espg/scikit-learn,betatim/scikit-learn,ivannz/scikit-learn,vinayak-mehta/scikit-learn,manhhomienbienthuy/scikit-learn,anntzer/scikit-learn,AlexandreAbraham/scikit-learn,lesteve/scikit-learn,ivannz/scikit-learn,vinayak-mehta/scikit-learn,TomDLT/scikit-learn,TomDLT/scikit-learn,vinayak-mehta/scikit-learn,espg/scikit-learn,espg/scikit-learn,manhhomienbienthuy/scikit-learn,jakirkham/scikit-learn,manhhomienbienthuy/scikit-learn,scikit-learn/scikit-learn,TomDLT/scikit-learn,lesteve/scikit-learn | examples/linear_model/plot_logistic_path.py | examples/linear_model/plot_logistic_path.py | """
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
# %%
# Load data
# ---------
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# %%
# Compute regularization path
# ---------------------------
import numpy as np
from sklearn import linear_model
from sklearn.svm import l1_min_c
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16)
clf = linear_model.LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.0,
)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
coefs_ = np.array(coefs_)
# %%
# Plot regularization path
# ------------------------
import matplotlib.pyplot as plt
plt.plot(np.log10(cs), coefs_, marker="o")
ymin, ymax = plt.ylim()
plt.xlabel("log(C)")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.axis("tight")
plt.show()
| """
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16)
print("Computing regularization path ...")
start = time()
clf = linear_model.LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.0,
)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took %0.3fs" % (time() - start))
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_, marker="o")
ymin, ymax = plt.ylim()
plt.xlabel("log(C)")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.axis("tight")
plt.show()
| bsd-3-clause | Python |
fd6899578bc8e6149921998d42be383f21adbe9a | add plots | cjekel/piecewise_linear_fit_py,cjekel/piecewise_linear_fit_py,cjekel/piecewiseLinearFitPython | examples/tf/bench_run_times/plot_results.py | examples/tf/bench_run_times/plot_results.py | import numpy as np
import matplotlib.pyplot as plt
# factor for 90% coverage with 90% confidence using Normal distribution
# with 10 samples from table XII in [1]
# [1] Montgomery, D. C., & Runger, G. C. (2014). Applied statistics and
# probability for engineers. Sixth edition. John Wiley & Sons.
k = 2.535
amd_fx_run_times = np.load('amd_fx_8350_titanXP/6_break_times.npy')
n = np.load('amd_fx_8350_titanXP/n.npy')
amd_fx_run_times_means = amd_fx_run_times.mean(axis=2)
amd_fx_run_times_stds = amd_fx_run_times.std(axis=2, ddof=1)
plt.figure()
plt.grid()
plt.errorbar(n, amd_fx_run_times_means[0], yerr=k*amd_fx_run_times_stds[0], capsize=2.0, label='Numpy')
plt.errorbar(n, amd_fx_run_times_means[1], yerr=k*amd_fx_run_times_stds[1], capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time (seconds, Lower is better)')
plt.semilogx()
plt.figure()
plt.grid()
plt.errorbar(n[1:], amd_fx_run_times_means[0,1:] - amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[0,1:]), capsize=2.0, label='Numpy')
plt.errorbar(n[1:], amd_fx_run_times_means[1,1:] - amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[1,1:]), capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time difference (Lower is better)')
plt.semilogx()
plt.figure()
plt.grid()
plt.errorbar(n[1:], amd_fx_run_times_means[0,1:]/amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[0,1:])/amd_fx_run_times_means[0,1:], capsize=2.0, label='Numpy')
plt.errorbar(n[1:], amd_fx_run_times_means[1,1:]/amd_fx_run_times_means[0,1:], yerr=(k*amd_fx_run_times_stds[1,1:])/amd_fx_run_times_means[0,1:], capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time relative to Numpy (Lower is better)')
plt.semilogx()
plt.figure()
plt.grid()
plt.errorbar(n[1:], amd_fx_run_times_means[0,1:]/amd_fx_run_times_means[1,1:], yerr=(k*amd_fx_run_times_stds[0,1:])/amd_fx_run_times_means[1,1:], capsize=2.0, label='Numpy')
plt.errorbar(n[1:], amd_fx_run_times_means[1,1:]/amd_fx_run_times_means[1,1:], yerr=(k*amd_fx_run_times_stds[1,1:])/amd_fx_run_times_means[1,1:], capsize=2.0, label='TF GPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time relative to TF GPU (Lower is better)')
plt.semilogx()
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
amd_fx_run_times = np.load('amd_fx_8350_titanXP/6_break_times.npy')
n = np.load('amd_fx_8350_titanXP/n.npy') | mit | Python |
064802e0354cd9d27a7ea0d1c69a45baf0587c63 | add pool example | mylokin/redisext,mylokin/redisext | redisext/pool.py | redisext/pool.py | '''
Pool
----
.. autoclass:: Pool
:members:
The simpliest example of pool usage is token pool::
class TokenPool(Connection, redisext.pool.Pool):
SERIALIZER = redisext.serializer.String
and this pool could be used like::
>>> facebook = TokenPool('facebook')
>>> facebook.push('fb1')
True
>>> facebook.push('fb1')
False
>>> facebook.push('fb2')
True
>>> facebook.pop()
u'fb1'
>>> facebook.pop()
u'fb2'
>>> facebook.pop()
>>>
SortedSet
---------
For your spectial needs check :class:`redisext.pool.SortedSet`.
'''
from __future__ import absolute_import
import redisext.models.abc
class Pool(redisext.models.abc.Model):
def pop(self):
'''
Pop item from pool.
:returns: obviously item
:rtype: how knows(serializer knows)
'''
item = self.connect_to_master().spop(self.key)
return self.decode(item)
def push(self, item):
'''
Place item into pool.
:param item: whatever you need to place into pool
:rtype: bool
'''
item = self.encode(item)
return bool(self.connect_to_master().sadd(self.key, item))
class SortedSet(redisext.models.abc.Model):
def add(self, element, score):
element = self.encode(element)
return bool(self.connect_to_master().zadd(self.key, score, element))
def length(self, start_score, end_score):
return int(self.connect_to_slave().zcount(self.key, start_score, end_score))
def members(self):
elements = self.connect_to_slave().zrevrange(self.key, 0, -1)
if not elements:
return elements
return [self.decode(e) for e in elements]
def contains(self, element):
element = self.encode(element)
return self.connect_to_slave().zscore(self.key, element) is not None
def truncate(self, size):
return int(self.connect_to_master().zremrangebyrank(self.key, 0, -1 * size - 1))
def clean(self):
return bool(self.connect_to_master().delete(self.key))
| '''
Pool
^^^^
.. autoclass:: Pool
:members:
SortedSet
^^^^^^^^^
.. autoclass:: SortedSet
:members:
'''
from __future__ import absolute_import
import redisext.models.abc
class Pool(redisext.models.abc.Model):
def pop(self):
item = self.connect_to_master().spop(self.key)
return self.decode(item)
def push(self, item):
item = self.encode(item)
return bool(self.connect_to_master().sadd(self.key, item))
class SortedSet(redisext.models.abc.Model):
def add(self, element, score):
element = self.encode(element)
return bool(self.connect_to_master().zadd(self.key, score, element))
def length(self, start_score, end_score):
return int(self.connect_to_slave().zcount(self.key, start_score, end_score))
def members(self):
elements = self.connect_to_slave().zrevrange(self.key, 0, -1)
if not elements:
return elements
return [self.decode(e) for e in elements]
def contains(self, element):
element = self.encode(element)
return self.connect_to_slave().zscore(self.key, element) is not None
def truncate(self, size):
return int(self.connect_to_master().zremrangebyrank(self.key, 0, -1 * size - 1))
def clean(self):
return bool(self.connect_to_master().delete(self.key))
| mit | Python |
398496ccfbad5e4452192663a6daa49f11ee0f59 | Hide 'yield from' code from py.test | Jc2k/libcloudcore | libcloudcore/tests/test_asyncio_drivers.py | libcloudcore/tests/test_asyncio_drivers.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import sys
import pytest
from libcloudcore.driver import Driver
from libcloudcore.auth.basic_auth import BasicAuth
from libcloudcore.validation import Validation
from libcloudcore.serializers import JsonSerializer
from libcloudcore.layer import Layer
@pytest.mark.skipif(sys.version_info < (3,3), reason="requires python3.3")
class TestDriver(unittest.TestCase):
def setUp(self):
from libcloudcore.asyncio.drivers.bigv import Driver
self.Driver = Driver
self.driver = Driver('username', 'password')
self.model = self.driver.model
self.operation = self.model.get_operation("list_virtual_machines")
def test_mro(self):
from libcloudcore.asyncio.backend import Driver as AsnycioBackend
self.assertEqual(inspect.getmro(self.Driver), (
self.Driver,
Driver,
AsnycioBackend,
Validation,
BasicAuth,
JsonSerializer,
Layer,
object,
))
| # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from libcloudcore.driver import Driver
from libcloudcore.auth.basic_auth import BasicAuth
from libcloudcore.asyncio.backend import Driver as AsnycioBackend
from libcloudcore.validation import Validation
from libcloudcore.serializers import JsonSerializer
from libcloudcore.layer import Layer
class TestDriver(unittest.TestCase):
def setUp(self):
from libcloudcore.asyncio.drivers.bigv import Driver
self.Driver = Driver
self.driver = Driver('username', 'password')
self.model = self.driver.model
self.operation = self.model.get_operation("list_virtual_machines")
def test_mro(self):
self.assertEqual(inspect.getmro(self.Driver), (
self.Driver,
Driver,
AsnycioBackend,
Validation,
BasicAuth,
JsonSerializer,
Layer,
object,
))
| apache-2.0 | Python |
e5ed6ef0c201d9a29c5934e3687abec7e13ae551 | update models to use a hashids for naming files | gitgik/updown | api/models.py | api/models.py | """
This file represents the models for the api app.
"""
from django.db import models
from .utils import get_file_upload_path, generate_uid
class DateMixin(models.Model):
"""A model mixin for date creation."""
created = models.DateField(auto_now_add=True)
class File(DateMixin):
"""This class represents the file model."""
file_id = models.CharField(default=generate_uid, max_length=50)
_file = models.FileField(upload_to=get_file_upload_path)
def __str__(self):
"""Return a string representation of the model instance."""
return "{}".format(self.name)
| """
This file represents the models for the api app.
"""
from django.db import models
class DateMixin(models.Model):
"""A model mixin for date creation."""
created = models.DateField(auto_now_add=True)
class File(DateMixin):
"""This class represents the file model."""
name = models.CharField(max_length=100, unique=True)
file = models.FileField(allow_files=True)
def __str__(self):
"""Return a string representation of the model instance."""
return "{}".format(self.name)
| mit | Python |
6e2515f4db3b6b9913e252cd52237574002637f2 | Add missing user_id in revoke_certs_by_user_and_project() | jianghuaw/nova,petrutlucian94/nova_dev,devendermishrajio/nova,CloudServer/nova,whitepages/nova,gooddata/openstack-nova,mikalstill/nova,openstack/nova,rahulunair/nova,dims/nova,ted-gould/nova,gspilio/nova,thomasem/nova,CiscoSystems/nova,jeffrey4l/nova,cloudbau/nova,akash1808/nova_test_latest,citrix-openstack-build/nova,Yusuke1987/openstack_template,double12gzh/nova,zhimin711/nova,zhimin711/nova,cernops/nova,klmitch/nova,leilihh/nova,alaski/nova,maoy/zknova,kimjaejoong/nova,tanglei528/nova,devendermishrajio/nova_test_latest,plumgrid/plumgrid-nova,rahulunair/nova,watonyweng/nova,spring-week-topos/nova-week,vladikr/nova_drafts,houshengbo/nova_vmware_compute_driver,scripnichenko/nova,JioCloud/nova,zaina/nova,tudorvio/nova,redhat-openstack/nova,tangfeixiong/nova,bigswitch/nova,jianghuaw/nova,eonpatapon/nova,virtualopensystems/nova,fajoy/nova,LoHChina/nova,ted-gould/nova,Stavitsky/nova,noironetworks/nova,sridevikoushik31/openstack,sridevikoushik31/nova,plumgrid/plumgrid-nova,CEG-FYP-OpenStack/scheduler,aristanetworks/arista-ovs-nova,watonyweng/nova,sridevikoushik31/openstack,DirectXMan12/nova-hacking,luogangyi/bcec-nova,tanglei528/nova,iuliat/nova,ewindisch/nova,isyippee/nova,tangfeixiong/nova,alexandrucoman/vbox-nova-driver,angdraug/nova,mahak/nova,eayunstack/nova,SUSE-Cloud/nova,maheshp/novatest,Yuriy-Leonov/nova,viggates/nova,Yuriy-Leonov/nova,devendermishrajio/nova_test_latest,viggates/nova,leilihh/nova,JioCloud/nova_test_latest,TwinkleChawla/nova,eonpatapon/nova,jianghuaw/nova,vladikr/nova_drafts,Tehsmash/nova,Juniper/nova,edulramirez/nova,ntt-sic/nova,alvarolopez/nova,virtualopensystems/nova,CloudServer/nova,bigswitch/nova,Metaswitch/calico-nova,Brocade-OpenSource/OpenStack-DNRM-Nova,SUSE-Cloud/nova,TieWei/nova,paulmathews/nova,rajalokan/nova,alaski/nova,zzicewind/nova,whitepages/nova,DirectXMan12/nova-hacking,takeshineshiro/nova,raildo/nova,kimjaejoong/nova,yatinkumbhare/openstack-nova,phenoxim/nova,NeCTAR-RC/nova,Metaswitch/calico-nova,sebrandon1/nova,adelina-t/nova,varunarya10/nova_test_latest,cloudbase/nova,takeshineshiro/nova,maoy/zknova,dims/nova,MountainWei/nova,cyx1231st/nova,maelnor/nova,eharney/nova,aristanetworks/arista-ovs-nova,devendermishrajio/nova,akash1808/nova,felixma/nova,gooddata/openstack-nova,j-carpentier/nova,gspilio/nova,Juniper/nova,blueboxgroup/nova,cloudbase/nova-virtualbox,rickerc/nova_audit,Triv90/Nova,JioCloud/nova,apporc/nova,cloudbase/nova-virtualbox,tudorvio/nova,qwefi/nova,BeyondTheClouds/nova,bclau/nova,eharney/nova,affo/nova,bgxavier/nova,spring-week-topos/nova-week,redhat-openstack/nova,edulramirez/nova,Tehsmash/nova,mgagne/nova,cernops/nova,mgagne/nova,berrange/nova,OpenAcademy-OpenStack/nova-scheduler,sacharya/nova,noironetworks/nova,rrader/nova-docker-plugin,mikalstill/nova,saleemjaveds/https-github.com-openstack-nova,petrutlucian94/nova,citrix-openstack-build/nova,MountainWei/nova,orbitfp7/nova,scripnichenko/nova,openstack/nova,BeyondTheClouds/nova,alexandrucoman/vbox-nova-driver,belmiromoreira/nova,vmturbo/nova,savi-dev/nova,nikesh-mahalka/nova,devoid/nova,adelina-t/nova,hanlind/nova,gooddata/openstack-nova,cernops/nova,joker946/nova,tianweizhang/nova,double12gzh/nova,ruslanloman/nova,fnordahl/nova,bclau/nova,dstroppa/openstack-smartos-nova-grizzly,Triv90/Nova,Brocade-OpenSource/OpenStack-DNRM-Nova,Francis-Liu/animated-broccoli,CEG-FYP-OpenStack/scheduler,thomasem/nova,paulmathews/nova,mmnelemane/nova,openstack/nova,ruslanloman/nova,rahulunair/nova,TieWei/nova,shahar-stratoscale/nova,tealover/nova,NewpTone/stacklab-nova,cloudbase/nova,NeCTAR-RC/nova,Stavitsky/nova,j-carpentier/nova,TwinkleChawla/nova,badock/nova,zaina/nova,devoid/nova,sebrandon1/nova,tealover/nova,NewpTone/stacklab-nova,maheshp/novatest,leilihh/novaha,varunarya10/nova_test_latest,dstroppa/openstack-smartos-nova-grizzly,vmturbo/nova,qwefi/nova,mahak/nova,cloudbau/nova,yrobla/nova,houshengbo/nova_vmware_compute_driver,maoy/zknova,JioCloud/nova_test_latest,gooddata/openstack-nova,apporc/nova,alvarolopez/nova,luogangyi/bcec-nova,JianyuWang/nova,usc-isi/nova,cyx1231st/nova,klmitch/nova,CCI-MOC/nova,DirectXMan12/nova-hacking,mikalstill/nova,rajalokan/nova,maheshp/novatest,Juniper/nova,yosshy/nova,LoHChina/nova,usc-isi/nova,Francis-Liu/animated-broccoli,Yusuke1987/openstack_template,badock/nova,blueboxgroup/nova,akash1808/nova,paulmathews/nova,imsplitbit/nova,sridevikoushik31/nova,sebrandon1/nova,silenceli/nova,shahar-stratoscale/nova,vmturbo/nova,Juniper/nova,mandeepdhami/nova,fajoy/nova,raildo/nova,BeyondTheClouds/nova,mandeepdhami/nova,jianghuaw/nova,jeffrey4l/nova,savi-dev/nova,rajalokan/nova,projectcalico/calico-nova,yrobla/nova,Triv90/Nova,hanlind/nova,CCI-MOC/nova,yatinkumbhare/openstack-nova,vmturbo/nova,JianyuWang/nova,fnordahl/nova,NewpTone/stacklab-nova,nikesh-mahalka/nova,dawnpower/nova,houshengbo/nova_vmware_compute_driver,sridevikoushik31/nova,bgxavier/nova,klmitch/nova,tianweizhang/nova,phenoxim/nova,sridevikoushik31/openstack,orbitfp7/nova,rickerc/nova_audit,hanlind/nova,usc-isi/nova,shootstar/novatest,iuliat/nova,fajoy/nova,rajalokan/nova,leilihh/novaha,eayunstack/nova,ewindisch/nova,projectcalico/calico-nova,dawnpower/nova,cloudbase/nova,belmiromoreira/nova,affo/nova,silenceli/nova,shail2810/nova,gspilio/nova,angdraug/nova,ntt-sic/nova,klmitch/nova,shail2810/nova,felixma/nova,mmnelemane/nova,joker946/nova,rrader/nova-docker-plugin,maelnor/nova,saleemjaveds/https-github.com-openstack-nova,savi-dev/nova,isyippee/nova,CiscoSystems/nova,petrutlucian94/nova,sacharya/nova,yrobla/nova,akash1808/nova_test_latest,sridevikoushik31/nova,berrange/nova,barnsnake351/nova,zzicewind/nova,imsplitbit/nova,petrutlucian94/nova_dev,mahak/nova,dstroppa/openstack-smartos-nova-grizzly,OpenAcademy-OpenStack/nova-scheduler,yosshy/nova,aristanetworks/arista-ovs-nova,shootstar/novatest,barnsnake351/nova | nova/cert/manager.py | nova/cert/manager.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from nova import crypto
from nova import flags
from nova import manager
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class CertManager(manager.Manager):
RPC_API_VERSION = '1.0'
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project"""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project"""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project"""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cert manager manages x509 certificates.
**Related Flags**
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`nova.cert.manager.Manager`).
"""
import base64
from nova import crypto
from nova import flags
from nova import manager
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class CertManager(manager.Manager):
RPC_API_VERSION = '1.0'
def init_host(self):
crypto.ensure_ca_filesystem()
def revoke_certs_by_user(self, context, user_id):
"""Revoke all user certs."""
return crypto.revoke_certs_by_user(user_id)
def revoke_certs_by_project(self, context, project_id):
"""Revoke all project certs."""
return crypto.revoke_certs_by_project(project_id)
def revoke_certs_by_user_and_project(self, context, user_id, project_id):
"""Revoke certs for user in project."""
return crypto.revoke_certs_by_user_and_project(project_id)
def generate_x509_cert(self, context, user_id, project_id):
"""Generate and sign a cert for user in project"""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
"""Get root ca for a project"""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
"""Get crl for a project"""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
| apache-2.0 | Python |
b45ce22e0d688e5c2a9a56f5eb87744cea87a263 | Fix scimath.power for negative integer input. | teoliphant/numpy-refactor,efiring/numpy-work,Ademan/NumPy-GSoC,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,illume/numpy3k,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,efiring/numpy-work,teoliphant/numpy-refactor,illume/numpy3k,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint,illume/numpy3k,efiring/numpy-work,chadnetzer/numpy-gaurdro,efiring/numpy-work,chadnetzer/numpy-gaurdro,chadnetzer/numpy-gaurdro | numpy/lib/scimath.py | numpy/lib/scimath.py | """
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
"""
__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
'arcsin', 'arctanh']
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.lib.type_check import isreal
#__all__.extend([key for key in dir(nx.umath)
# if key[0] != '_' and key not in __all__])
_ln2 = nx.log(2.0)
def _tocomplex(arr):
if isinstance(arr.dtype, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
x = asarray(x)
if any(isreal(x) & (x<0)):
x = _tocomplex(x)
return x
def _fix_int_lt_zero(x):
x = asarray(x)
if any(isreal(x) & (x < 0)):
x = x * 1.0
return x
def _fix_real_abs_gt_1(x):
x = asarray(x)
if any(isreal(x) & (abs(x)>1)):
x = _tocomplex(x)
return x
def sqrt(x):
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
def log(x):
x = _fix_real_lt_zero(x)
return nx.log(x)
def log10(x):
x = _fix_real_lt_zero(x)
return nx.log10(x)
def logn(n, x):
""" Take log base n of x.
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
def log2(x):
""" Take log base 2 of x.
"""
x = _fix_real_lt_zero(x)
return nx.log(x)/_ln2
def power(x, p):
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
return nx.power(x, p)
def arccos(x):
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
def arcsin(x):
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
def arctanh(x):
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
| """
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
"""
__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
'arcsin', 'arctanh']
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.lib.type_check import isreal
#__all__.extend([key for key in dir(nx.umath)
# if key[0] != '_' and key not in __all__])
_ln2 = nx.log(2.0)
def _tocomplex(arr):
if isinstance(arr.dtype, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
x = asarray(x)
if any(isreal(x) & (x<0)):
x = _tocomplex(x)
return x
def _fix_real_abs_gt_1(x):
x = asarray(x)
if any(isreal(x) & (abs(x)>1)):
x = _tocomplex(x)
return x
def sqrt(x):
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
def log(x):
x = _fix_real_lt_zero(x)
return nx.log(x)
def log10(x):
x = _fix_real_lt_zero(x)
return nx.log10(x)
def logn(n, x):
""" Take log base n of x.
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
def log2(x):
""" Take log base 2 of x.
"""
x = _fix_real_lt_zero(x)
return nx.log(x)/_ln2
def power(x, p):
x = _fix_real_lt_zero(x)
return nx.power(x, p)
def arccos(x):
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
def arcsin(x):
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
def arctanh(x):
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
| bsd-3-clause | Python |
3c299bf2682a9b8d5be2c9c8f308720182935d12 | Add missing username to log statement | lutris/website,lutris/website,lutris/website,lutris/website | accounts/tasks.py | accounts/tasks.py | import logging
from celery import task
from django.db import IntegrityError
from django.utils.text import slugify
import games.models
from accounts.models import User
from emails.messages import send_daily_mod_mail
from games.util.steam import create_game
LOGGER = logging.getLogger()
@task
def sync_steam_library(user_id):
user = User.objects.get(pk=user_id)
steamid = user.steamid
library = games.models.GameLibrary.objects.get(user=user)
steam_games = games.util.steam.steam_sync(steamid)
if not steam_games:
LOGGER.info("Steam user %s has no steam games", user.username)
return
for game in steam_games:
LOGGER.info("Adding %s to %s's library", game['name'], user.username)
if not game['img_icon_url']:
LOGGER.info("Game %s has no icon", game['name'])
continue
try:
steam_game = games.models.Game.objects.get(steamid=game['appid'])
except games.models.Game.MultipleObjectsReturned:
LOGGER.error("Multiple games with appid '%s'", game['appid'])
continue
except games.models.Game.DoesNotExist:
LOGGER.info("No game with steam id %s", game['appid'])
try:
steam_game = games.models.Game.objects.get(
slug=slugify(game['name'])[:50]
)
if not steam_game.steamid:
steam_game.steamid = game['appid']
steam_game.save()
except games.models.Game.DoesNotExist:
steam_game = create_game(game)
LOGGER.info("Creating game %s", steam_game.slug)
try:
library.games.add(steam_game)
except IntegrityError:
# Game somehow already added.
pass
@task
def daily_mod_mail():
send_daily_mod_mail()
| import logging
from celery import task
from django.db import IntegrityError
from django.utils.text import slugify
import games.models
from accounts.models import User
from emails.messages import send_daily_mod_mail
from games.util.steam import create_game
LOGGER = logging.getLogger()
@task
def sync_steam_library(user_id):
user = User.objects.get(pk=user_id)
steamid = user.steamid
library = games.models.GameLibrary.objects.get(user=user)
steam_games = games.util.steam.steam_sync(steamid)
if not steam_games:
LOGGER.info("Steam user %s has no steam games")
return
for game in steam_games:
LOGGER.info("Adding %s to %s's library", game['name'], user.username)
if not game['img_icon_url']:
LOGGER.info("Game %s has no icon", game['name'])
continue
try:
steam_game = games.models.Game.objects.get(steamid=game['appid'])
except games.models.Game.MultipleObjectsReturned:
LOGGER.error("Multiple games with appid '%s'", game['appid'])
continue
except games.models.Game.DoesNotExist:
LOGGER.info("No game with steam id %s", game['appid'])
try:
steam_game = games.models.Game.objects.get(
slug=slugify(game['name'])[:50]
)
if not steam_game.steamid:
steam_game.steamid = game['appid']
steam_game.save()
except games.models.Game.DoesNotExist:
steam_game = create_game(game)
LOGGER.info("Creating game %s", steam_game.slug)
try:
library.games.add(steam_game)
except IntegrityError:
# Game somehow already added.
pass
@task
def daily_mod_mail():
send_daily_mod_mail()
| agpl-3.0 | Python |
3b6d5fd80eb4d95679b969e8809b154d6254de8d | Replace get_user_profile_by_email with get_user. | jrowan/zulip,Galexrt/zulip,punchagan/zulip,amanharitsh123/zulip,rishig/zulip,synicalsyntax/zulip,vaidap/zulip,timabbott/zulip,tommyip/zulip,rishig/zulip,shubhamdhama/zulip,punchagan/zulip,mahim97/zulip,tommyip/zulip,hackerkid/zulip,dhcrzf/zulip,brainwane/zulip,jackrzhang/zulip,tommyip/zulip,rht/zulip,dhcrzf/zulip,mahim97/zulip,dhcrzf/zulip,Galexrt/zulip,synicalsyntax/zulip,andersk/zulip,showell/zulip,brainwane/zulip,punchagan/zulip,tommyip/zulip,punchagan/zulip,andersk/zulip,rht/zulip,vabs22/zulip,jackrzhang/zulip,rht/zulip,punchagan/zulip,zulip/zulip,tommyip/zulip,jrowan/zulip,timabbott/zulip,showell/zulip,timabbott/zulip,brainwane/zulip,dhcrzf/zulip,eeshangarg/zulip,zulip/zulip,Galexrt/zulip,jackrzhang/zulip,andersk/zulip,rishig/zulip,hackerkid/zulip,hackerkid/zulip,Galexrt/zulip,kou/zulip,brainwane/zulip,kou/zulip,verma-varsha/zulip,timabbott/zulip,zulip/zulip,rishig/zulip,vabs22/zulip,showell/zulip,rishig/zulip,rht/zulip,andersk/zulip,amanharitsh123/zulip,eeshangarg/zulip,timabbott/zulip,jrowan/zulip,brainwane/zulip,rishig/zulip,verma-varsha/zulip,dhcrzf/zulip,vabs22/zulip,amanharitsh123/zulip,mahim97/zulip,tommyip/zulip,zulip/zulip,amanharitsh123/zulip,punchagan/zulip,amanharitsh123/zulip,rht/zulip,Galexrt/zulip,brockwhittaker/zulip,zulip/zulip,eeshangarg/zulip,kou/zulip,vabs22/zulip,amanharitsh123/zulip,brockwhittaker/zulip,synicalsyntax/zulip,jrowan/zulip,jackrzhang/zulip,eeshangarg/zulip,eeshangarg/zulip,shubhamdhama/zulip,rht/zulip,vaidap/zulip,jackrzhang/zulip,showell/zulip,vabs22/zulip,vabs22/zulip,brockwhittaker/zulip,rishig/zulip,synicalsyntax/zulip,rht/zulip,shubhamdhama/zulip,jackrzhang/zulip,vaidap/zulip,verma-varsha/zulip,showell/zulip,shubhamdhama/zulip,brainwane/zulip,dhcrzf/zulip,jrowan/zulip,jackrzhang/zulip,shubhamdhama/zulip,verma-varsha/zulip,kou/zulip,showell/zulip,brockwhittaker/zulip,zulip/zulip,brockwhittaker/zulip,dhcrzf/zulip,synicalsyntax/zulip,brainwane/zulip,Galexrt/zulip,showell/zulip,tommyip/zulip,kou/zulip,hackerkid/zulip,verma-varsha/zulip,zulip/zulip,mahim97/zulip,brockwhittaker/zulip,hackerkid/zulip,shubhamdhama/zulip,mahim97/zulip,kou/zulip,punchagan/zulip,synicalsyntax/zulip,hackerkid/zulip,andersk/zulip,kou/zulip,vaidap/zulip,vaidap/zulip,timabbott/zulip,mahim97/zulip,verma-varsha/zulip,synicalsyntax/zulip,eeshangarg/zulip,eeshangarg/zulip,vaidap/zulip,Galexrt/zulip,jrowan/zulip,timabbott/zulip,shubhamdhama/zulip,andersk/zulip,andersk/zulip,hackerkid/zulip | zerver/management/commands/bankrupt_users.py | zerver/management/commands/bankrupt_users.py | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import CommandError
from zerver.lib.actions import do_update_message_flags
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Message
class Command(ZulipBaseCommand):
help = """Bankrupt one or many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('emails', metavar='<email>', type=str, nargs='+',
help='email address to bankrupt')
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
for email in options['emails']:
try:
user_profile = self.get_user(email, realm)
except CommandError:
print("e-mail %s doesn't exist in the realm %s, skipping" % (email, realm))
continue
do_update_message_flags(user_profile, "add", "read", None, True, None, None)
messages = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
old_pointer = user_profile.pointer
new_pointer = messages[0].id
user_profile.pointer = new_pointer
user_profile.save(update_fields=["pointer"])
print("%s: %d => %d" % (email, old_pointer, new_pointer))
else:
print("%s has no messages, can't bankrupt!" % (email,))
| from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_update_message_flags
from zerver.models import UserProfile, Message, get_user_profile_by_email
class Command(BaseCommand):
help = """Bankrupt one or many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('emails', metavar='<email>', type=str, nargs='+',
help='email address to bankrupt')
def handle(self, *args, **options):
# type: (*Any, **str) -> None
for email in options['emails']:
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
print("e-mail %s doesn't exist in the system, skipping" % (email,))
continue
do_update_message_flags(user_profile, "add", "read", None, True, None, None)
messages = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
old_pointer = user_profile.pointer
new_pointer = messages[0].id
user_profile.pointer = new_pointer
user_profile.save(update_fields=["pointer"])
print("%s: %d => %d" % (email, old_pointer, new_pointer))
else:
print("%s has no messages, can't bankrupt!" % (email,))
| apache-2.0 | Python |
3135bda8970a2fdefa92b932c15cf5c559392c9c | allow to specify db session callable directly | ergo/ziggurat_foundations,ergo/ziggurat_foundations | ziggurat_foundations/ext/pyramid/get_user.py | ziggurat_foundations/ext/pyramid/get_user.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import logging
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services.user import UserService
CONFIG_KEY = "ziggurat_foundations"
log = logging.getLogger(__name__)
def includeme(config):
settings = config.registry.settings
session_provider_callable_config = settings.get(
"%s.session_provider_callable" % CONFIG_KEY
)
if not session_provider_callable_config:
def session_provider_callable(request):
return get_db_session()
test_session_callable = None
else:
if callable(session_provider_callable_config):
session_provider_callable = session_provider_callable_config
else:
parts = session_provider_callable_config.split(":")
_tmp = importlib.import_module(parts[0])
session_provider_callable = getattr(_tmp, parts[1])
test_session_callable = "session exists"
# This function is bundled into the request, so for each request you can
# do request.user
def get_user(request):
userid = request.unauthenticated_userid
if test_session_callable is None:
# set db_session to none to pass to the UserModel.by_id
db_session = None
else:
# Else assign the request.session
db_session = session_provider_callable(request)
if userid is not None:
return UserService.by_id(userid, db_session=db_session)
# add in request.user function
config.add_request_method(get_user, "user", reify=True, property=True)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import logging
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services.user import UserService
CONFIG_KEY = "ziggurat_foundations"
log = logging.getLogger(__name__)
def includeme(config):
settings = config.registry.settings
session_provider_callable_config = settings.get(
"%s.session_provider_callable" % CONFIG_KEY
)
if not session_provider_callable_config:
def session_provider_callable(request):
return get_db_session()
test_session_callable = None
else:
parts = session_provider_callable_config.split(":")
_tmp = importlib.import_module(parts[0])
session_provider_callable = getattr(_tmp, parts[1])
test_session_callable = "session exists"
# This function is bundled into the request, so for each request you can
# do request.user
def get_user(request):
userid = request.unauthenticated_userid
if test_session_callable is None:
# set db_session to none to pass to the UserModel.by_id
db_session = None
else:
# Else assign the request.session
db_session = session_provider_callable(request)
if userid is not None:
return UserService.by_id(userid, db_session=db_session)
# add in request.user function
config.add_request_method(get_user, "user", reify=True, property=True)
| bsd-3-clause | Python |
07def114287bc3488e76e2516ca7682954ba4a09 | Use default alphabet | odtvince/APITaxi,l-vincent-l/APITaxi,openmaraude/APITaxi,odtvince/APITaxi,openmaraude/APITaxi,odtvince/APITaxi,odtvince/APITaxi,l-vincent-l/APITaxi | APITaxi/extensions.py | APITaxi/extensions.py | #coding: utf-8
from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy
from sqlalchemy.pool import QueuePool as BaseQueuePool
class SQLAlchemy(BaseSQLAlchemy):
def apply_driver_hacks(self, app, info, options):
BaseSQLAlchemy.apply_driver_hacks(self, app, info, options)
class QueuePool(BaseQueuePool):
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw):
kw['use_threadlocal'] = True
BaseQueuePool.__init__(self, creator, pool_size, max_overflow, timeout, **kw)
options.setdefault('poolclass', QueuePool)
db = SQLAlchemy(session_options={"autoflush":False})
from .utils.redis_geo import GeoRedis
from flask.ext.redis import FlaskRedis
redis_store = FlaskRedis.from_custom_provider(GeoRedis)
from flask.ext.celery import Celery
celery = Celery()
from dogpile.cache import make_region
region_taxi = make_region('taxis')
region_hails = make_region('hails')
region_zupc = make_region('zupc')
def user_key_generator(namespace, fn, **kw):
def generate_key(*args, **kwargs):
return fn.__name__ +\
"_".join(str(s) for s in args) +\
"_".join(k+"_"+str(v) for k,v in kwargs.iteritems())
return generate_key
region_users = make_region('users', function_key_generator=user_key_generator)
from flask.ext.uploads import (UploadSet, configure_uploads,
DOCUMENTS, DATA, ARCHIVES, IMAGES)
documents = UploadSet('documents', DOCUMENTS + DATA + ARCHIVES)
images = UploadSet('images', IMAGES)
from .index_zupc import IndexZUPC
index_zupc = IndexZUPC()
from .utils.cache_user_datastore import CacheUserDatastore
from .models import security
user_datastore = CacheUserDatastore(db, security.User,
security.Role)
import shortuuid
suid = shortuuid.ShortUUID()
def get_short_uuid():
return suid.uuid()[:7]
| #coding: utf-8
from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy
from sqlalchemy.pool import QueuePool as BaseQueuePool
class SQLAlchemy(BaseSQLAlchemy):
def apply_driver_hacks(self, app, info, options):
BaseSQLAlchemy.apply_driver_hacks(self, app, info, options)
class QueuePool(BaseQueuePool):
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw):
kw['use_threadlocal'] = True
BaseQueuePool.__init__(self, creator, pool_size, max_overflow, timeout, **kw)
options.setdefault('poolclass', QueuePool)
db = SQLAlchemy(session_options={"autoflush":False})
from .utils.redis_geo import GeoRedis
from flask.ext.redis import FlaskRedis
redis_store = FlaskRedis.from_custom_provider(GeoRedis)
from flask.ext.celery import Celery
celery = Celery()
from dogpile.cache import make_region
region_taxi = make_region('taxis')
region_hails = make_region('hails')
region_zupc = make_region('zupc')
def user_key_generator(namespace, fn, **kw):
def generate_key(*args, **kwargs):
return fn.__name__ +\
"_".join(str(s) for s in args) +\
"_".join(k+"_"+str(v) for k,v in kwargs.iteritems())
return generate_key
region_users = make_region('users', function_key_generator=user_key_generator)
from flask.ext.uploads import (UploadSet, configure_uploads,
DOCUMENTS, DATA, ARCHIVES, IMAGES)
documents = UploadSet('documents', DOCUMENTS + DATA + ARCHIVES)
images = UploadSet('images', IMAGES)
from .index_zupc import IndexZUPC
index_zupc = IndexZUPC()
from .utils.cache_user_datastore import CacheUserDatastore
from .models import security
user_datastore = CacheUserDatastore(db, security.User,
security.Role)
import shortuuid
suid = shortuuid.ShortUUID(alphabet=
'0123456789abcdefghijklmnopqrstuvwxyzABDEFGHIJKLOMNOPQRSTUVWXYZ')
def get_short_uuid():
return suid.uuid()[:7]
| agpl-3.0 | Python |
9f500668555292add5d87c942e0cd804aefa6df2 | Replace cat usage for fgrep | stackforge/fuel-ostf,eayunstack/fuel-ostf,stackforge/fuel-ostf,eayunstack/fuel-ostf | fuel_health/tests/cloudvalidation/test_disk_space_db.py | fuel_health/tests/cloudvalidation/test_disk_space_db.py | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_health import cloudvalidation
class DBSpaceTest(cloudvalidation.CloudValidationTest):
"""Cloud Validation Test class for free space for DB."""
def _check_db_disk_expectation_warning(self, host):
"""Checks whether DB expects less free space than actually
is presented on the controller node
"""
scheduler_log = 'nova-scheduler.log'
if self.config.compute.deployment_os.lower() == 'centos':
scheduler_log = 'scheduler.log'
err_msg = "Cannot check {scheduler_log} at {host}".format(
host=host, scheduler_log=scheduler_log)
warning_msg = "Host has more disk space than database expected"
cmd = "fgrep '{msg}' -q /var/log/nova/{scheduler_log}".format(
msg=warning_msg, scheduler_log=scheduler_log)
out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg,
'check nova-scheduler.log', host, cmd)
self.verify_response_true(not err, err_msg, 1)
return out
def test_db_expectation_free_space(self):
"""Check disk space allocation for databases on controller nodes
Target component: Nova
Scenario:
1. Check disk space allocation for databases on controller nodes
Duration: 20 s.
Deployment tags: disabled
Available since release: 2014.2-6.1
"""
hosts = filter(self._check_db_disk_expectation_warning,
self.controllers)
self.verify_response_true(not hosts,
("Free disk space cannot be used "
"by database on node(s): {hosts}"
).format(hosts=hosts),
1)
| # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuel_health import cloudvalidation
class DBSpaceTest(cloudvalidation.CloudValidationTest):
"""Cloud Validation Test class for free space for DB."""
def _check_db_disk_expectation_warning(self, host):
"""Checks whether DB expects less free space than actually
is presented on the controller node
"""
scheduler_log = 'nova-scheduler.log'
if self.config.compute.deployment_os.lower() == 'centos':
scheduler_log = 'scheduler.log'
err_msg = "Cannot check {scheduler_log} at {host}".format(
host=host, scheduler_log=scheduler_log)
warning_msg = "Host has more disk space than database expected"
cmd = ("cat /var/log/nova/{scheduler_log} "
"| grep '{msg}' | tail -1").format(
msg=warning_msg, scheduler_log=scheduler_log)
out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg,
'check nova-scheduler.log', host, cmd)
self.verify_response_true(not err, err_msg, 1)
return out
def test_db_expectation_free_space(self):
"""Check disk space allocation for databases on controller nodes
Target component: Nova
Scenario:
1. Check disk space allocation for databases on controller nodes
Duration: 20 s.
Deployment tags: disabled
Available since release: 2014.2-6.1
"""
hosts = filter(self._check_db_disk_expectation_warning,
self.controllers)
self.verify_response_true(not hosts,
("Free disk space cannot be used "
"by database on node(s): {hosts}"
).format(hosts=hosts),
1)
| apache-2.0 | Python |
2284f9f944ef72c7e2f6c9a4e93e395b09196719 | modify initial config | fatrix/django-golive,fatrix/django-golive | golive/management/commands/create_config.py | golive/management/commands/create_config.py | from django.core.management import BaseCommand
from fabric.state import output
import sys
from golive.stacks.stack import StackFactory, Stack
import yaml
class Command(BaseCommand):
help = 'Creates a basic exampe configuration file'
output['stdout'] = False
example = """CONFIG:
PLATFORM: DEDICATED
STACK: CLASSIC
ENVIRONMENTS:
DEFAULTS:
INIT_USER: root
PROJECT_NAME: djangoproject
PUBKEY: $HOME/.ssh/id_dsa.pub
TESTING:
SERVERNAME: testserver
ROLES:
APP_HOST:
- testserver
DB_HOST:
- testserver
WEB_HOST:
- testserver"""
def handle(self, *args, **options):
example_file = open(Stack.CONFIG, 'w')
example_file.write(Command.example)
example_file.close()
def end(self):
self.stdout.write('Done\n')
| from django.core.management import BaseCommand
from fabric.state import output
import sys
from golive.stacks.stack import StackFactory, Stack
import yaml
class Command(BaseCommand):
help = 'Creates a basic exampe configuration file'
output['stdout'] = False
example = """CONFIG:
PLATFORM: DEDICATED
STACK: CLASSIC
ENVIRONMENTS:
DEFAULTS:
INIT_USER: fatrix
PROJECT_NAME: django_example
PUBKEY: $HOME/user.pub
TESTING:
SERVERNAME: golive-sandbox1
ROLES:
APP_HOST:
- testbox1
DB_HOST:
- testbox1
WEB_HOST:
- testbox1"""
def handle(self, *args, **options):
example_file = open(Stack.CONFIG, 'w')
example_file.write(Command.example)
example_file.close()
def end(self):
self.stdout.write('Done\n')
| bsd-2-clause | Python |
bc85dffa594c292094d2aa1f5a456e0a0690ea79 | Remove debug code | pombredanne/grumpy,pombredanne/grumpy | grumpy-tools-src/tests/test_grumpy_tools.py | grumpy-tools-src/tests/test_grumpy_tools.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `grumpy_tools` package."""
import tempfile
import unittest
import pytest
from click.testing import CliRunner
from grumpy_tools import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
@pytest.mark.xfail
def test_command_line_interface(capfd):
"""Test the CLI."""
runner = CliRunner()
out, err = capfd.readouterr()
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert '>>> ' in out, (result.output, out, err)
def test_run_input_inline(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run', '-c', "print('Hello World')",])
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_stdin(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run'], input="print('Hello World')")
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_file(capfd):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as script_file:
script_file.write("print('Hello World')")
script_file.flush()
result = runner.invoke(cli.main, ['run', script_file.name])
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `grumpy_tools` package."""
import tempfile
import unittest
import pytest
from click.testing import CliRunner
from grumpy_tools import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
@pytest.mark.xfail
def test_command_line_interface(capfd):
"""Test the CLI."""
runner = CliRunner()
out, err = capfd.readouterr()
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert '>>> ' in out, (result.output, out, err)
def test_run_input_inline(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run', '-c', "print('Hello World')",])
# import wdb; wdb.set_trace()
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_stdin(capfd):
runner = CliRunner()
result = runner.invoke(cli.main, ['run'], input="print('Hello World')")
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
def test_run_input_file(capfd):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as script_file:
script_file.write("print('Hello World')")
script_file.flush()
result = runner.invoke(cli.main, ['run', script_file.name])
out, err = capfd.readouterr()
assert out == 'Hello World\n', (err, result.output)
assert result.exit_code == 0
| apache-2.0 | Python |
f42fdde5404c3025236ad7dcade4b08529e7ce36 | repair Noneuser_bug | ifwenvlook/blog,ifwenvlook/blog,ifwenvlook/blog,ifwenvlook/blog | app/delete.py | app/delete.py | from .models import User
from . import db
def deletenone():
noneuser=User.query.filter_by(username=None).all()
for user in noneuser:
db.session.delete(user)
db.session.commit()
| from .models import User
from . import db
def deletenone():
noneuser=User.query.filter_by(username=None).all()
for user in noneuser:
db.session.delete(user)
db.session.commit()
| mit | Python |
9ac03fa54f0134905033f615f6e02804f704b1a0 | Add User and Items | Alweezy/cp2-bucketlist-api,Alweezy/cp2-bucketlist-api,Alweezy/cp2-bucketlist-api | app/models.py | app/models.py | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(UserMixin, db.Model):
"""This class represents the user table."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(256), nullable=False, unique=True)
user_password = db.Column(db.String(255), nullable=False)
bucketlists = db.relationship('BucketList', order_by="BucketList.id",
cascade="all,delete-orphan")
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
@property
def password(self):
raise AttributeError('You cannot access password')
@password.setter
def password(self):
self.user_password = generate_password_hash(self.password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class Bucketlist(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
class BucketListItem(db.Model):
"""This class represents the bucketlist_item table"""
__tablename__ = 'bucketlistitems'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
done = db.Column(db.Boolean, default=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(Bucketlist.id))
def __init__(self, name, bucketlist_id):
"""Initialize with name and bucketlist_id"""
self.name = name
self.bucketlist_id = bucketlist_id
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_items():
return BucketListItem.query.filter_by(bucketlist_id=Bucketlist.id)
def delete(self):
db.session.delete(self)
db.session.commit()
| from app import db
class Bucketlist(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return Bucketlist.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<Bucketlist: {}>".format(self.name)
| mit | Python |
06d71ede1c1feaa597b442f4ead63d2b2e31e715 | fix `trigger` -> `__call__` | chainer/chainer,keisuke-umezawa/chainer,niboshi/chainer,tkerola/chainer,niboshi/chainer,keisuke-umezawa/chainer,wkentaro/chainer,chainer/chainer,wkentaro/chainer,keisuke-umezawa/chainer,hvy/chainer,hvy/chainer,hvy/chainer,okuta/chainer,hvy/chainer,chainer/chainer,okuta/chainer,pfnet/chainer,wkentaro/chainer,niboshi/chainer,okuta/chainer,chainer/chainer,keisuke-umezawa/chainer,wkentaro/chainer,niboshi/chainer,okuta/chainer | chainer/training/triggers/once_trigger.py | chainer/training/triggers/once_trigger.py | class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or called again when training resumed.
Args:
call_on_resume (bool): Whether the extension is called again or not
when restored from a snapshot. It is set to ``False`` by default.
"""
def __init__(self, call_on_resume=False):
self._flag_first = True
self._flag_resumed = call_on_resume
def __call__(self, trainer):
flag = self._flag_first or self._flag_resumed
self._flag_resumed = False
self._flag_first = False
return flag
def serialize(self, serializer):
self._flag_first = serializer('_flag_first', self._flag_first)
| class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or called again when training resumed.
Args:
call_on_resume (bool): Whether the extension is called again or not
when restored from a snapshot. It is set to ``False`` by default.
"""
def __init__(self, call_on_resume=False):
self._flag_first = True
self._flag_resumed = call_on_resume
def trigger(self, trainer):
flag = self._flag_first or self._flag_resumed
self._flag_resumed = False
self._flag_first = False
return flag
def serialize(self, serializer):
self._flag_first = serializer('_flag_first', self._flag_first)
| mit | Python |
ddec6067054cc4408ac174e3ea4ffeca2a962201 | Remove unnecessary assert from view for Notice home. | 18F/regulations-site,18F/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,tadhg-ohiggins/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,eregs/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,18F/regulations-site | regulations/views/notice_home.py | regulations/views/notice_home.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import itemgetter
import logging
from django.http import Http404
from django.template.response import TemplateResponse
from django.views.generic.base import View
from regulations.generator.api_reader import ApiReader
from regulations.views.preamble import (
notice_data, CommentState)
logger = logging.getLogger(__name__)
class NoticeHomeView(View):
"""
Basic view that provides a list of regulations and notices to the context.
"""
template_name = None # We should probably have a default notice template.
def get(self, request, *args, **kwargs):
notices = ApiReader().notices().get("results", [])
context = {}
notices_meta = []
for notice in notices:
try:
if notice.get("document_number"):
_, meta, _ = notice_data(notice["document_number"])
notices_meta.append(meta)
except Http404:
pass
notices_meta = sorted(notices_meta, key=itemgetter("publication_date"),
reverse=True)
context["notices"] = notices_meta
# Django templates won't show contents of CommentState as an Enum, so:
context["comment_state"] = {state.name: state.value for state in
CommentState}
template = self.template_name
return TemplateResponse(request=request, template=template,
context=context)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import itemgetter
import logging
from django.http import Http404
from django.template.response import TemplateResponse
from django.views.generic.base import View
from regulations.generator.api_reader import ApiReader
from regulations.views.preamble import (
notice_data, CommentState)
logger = logging.getLogger(__name__)
class NoticeHomeView(View):
"""
Basic view that provides a list of regulations and notices to the context.
"""
template_name = None # We should probably have a default notice template.
def get(self, request, *args, **kwargs):
notices = ApiReader().notices().get("results", [])
context = {}
notices_meta = []
for notice in notices:
try:
if notice.get("document_number"):
_, meta, _ = notice_data(notice["document_number"])
notices_meta.append(meta)
except Http404:
pass
notices_meta = sorted(notices_meta, key=itemgetter("publication_date"),
reverse=True)
context["notices"] = notices_meta
# Django templates won't show contents of CommentState as an Enum, so:
context["comment_state"] = {state.name: state.value for state in
CommentState}
assert self.template_name
template = self.template_name
return TemplateResponse(request=request, template=template,
context=context)
| cc0-1.0 | Python |
91946410f14b21e510a104b105a6f5036cc8944f | build updated | CountZer0/PipelineConstructionSet,CountZer0/PipelineConstructionSet,CountZer0/PipelineConstructionSet | python/common/core/globalVariables.py | python/common/core/globalVariables.py | '''
Author: Jason Parks
Created: Apr 22, 2012
Module: common.core.globalVariables
Purpose: to import globalVariables
'''
# Location of Toolset
toolsLocation = 'C:/Users/jason/git/PipelineConstructionSet'
# NOTE!: It is necessary to manually add the above location's
# python directory, i.e-
#
# PYTHONPATH = 'C:/Users/jason/git/PipelineConstructionSet/python'
#
# to the PYTHONPATH environment variable on all user's
# machines whom want to use Pipeline Construction set
# Location of setup schema data file
schemaLocation = 'C:/Users/jason/remotePCS'
# "schemaLocation" should probably be in a shared folder on the network
# so all users can get updates the T.A. makes to the file called
# pcsSchema.xml in this location. You can find a sample version of
# this file in ./PipelineConstructionSet/schemas/pcsSchemaSample.xml
# Name your games here:
teamA = 'GreatGameA'
teamB = 'GreatGameB'
teamC = 'GreatGameC'
teamD = 'GreatGameD'
# You need to change the name of the file
# ./PipelineConstructionSet/schemas/GreatGameA.xml
# and the xml header info in the file as well
# If you are making tools for more than one team,
# you'll need to make more GreatGame*.xml files
# manually update to date/time
build = '050612-21.01'
# This will show up in the PythonEditor or ScriptEditor
# when our DCC app first launches the toolMenu.
print "common.core.globalVariables imported" | '''
Author: Jason Parks
Created: Apr 22, 2012
Module: common.core.globalVariables
Purpose: to import globalVariables
'''
# Location of Toolset
toolsLocation = 'C:/Users/jason/git/PipelineConstructionSet'
# NOTE!: It is necessary to manually add the above location's
# python directory, i.e-
#
# PYTHONPATH = 'C:/Users/jason/git/PipelineConstructionSet/python'
#
# to the PYTHONPATH environment variable on all user's
# machines whom want to use Pipeline Construction set
# Location of setup schema data file
schemaLocation = 'C:/Users/jason/remotePCS'
# "schemaLocation" should probably be in a shared folder on the network
# so all users can get updates the T.A. makes to the file called
# pcsSchema.xml in this location. You can find a sample version of
# this file in ./PipelineConstructionSet/schemas/pcsSchemaSample.xml
# Name your games here:
teamA = 'GreatGameA'
teamB = 'GreatGameB'
teamC = 'GreatGameC'
teamD = 'GreatGameD'
# You need to change the name of the file
# ./PipelineConstructionSet/schemas/GreatGameA.xml
# and the xml header info in the file as well
# If you are making tools for more than one team,
# you'll need to make more GreatGame*.xml files
# manually update to date/time
build = '042212-20.27'
# This will show up in the PythonEditor or ScriptEditor
# when our DCC app first launches the toolMenu.
print "common.core.globalVariables imported" | bsd-3-clause | Python |
c573263511bcbf0ffe37f538142aedd9064f8ae0 | Remove copying devdata.env as it's only used for the Google API key we've removed | hypothesis/via,hypothesis/via,hypothesis/via | bin/devdata.py | bin/devdata.py | """Download .devdata from github.com:hypothesis/devdata.git."""
import os
from pathlib import Path
from shutil import copyfile
from subprocess import check_call
from tempfile import TemporaryDirectory
def _get_devdata():
# The directory that we'll clone the devdata git repo into.
with TemporaryDirectory() as tmp_dir_name:
git_dir = os.path.join(tmp_dir_name, "devdata")
check_call(["git", "clone", "[email protected]:hypothesis/devdata.git", git_dir])
# Copy devdata env file into place.
for source, target in (
(
"via/devdata/google_drive_credentials.json",
".devdata/google_drive_credentials.json",
),
(
"via/devdata/google_drive_resource_keys.json",
".devdata/google_drive_resource_keys.json",
),
):
copyfile(
os.path.join(git_dir, source),
os.path.join(Path(__file__).parent.parent, target),
)
if __name__ == "__main__":
_get_devdata()
| """Download .devdata.env from github.com:hypothesis/devdata.git."""
import os
from pathlib import Path
from shutil import copyfile
from subprocess import check_call
from tempfile import TemporaryDirectory
def _get_devdata():
# The directory that we'll clone the devdata git repo into.
with TemporaryDirectory() as tmp_dir_name:
git_dir = os.path.join(tmp_dir_name, "devdata")
check_call(["git", "clone", "[email protected]:hypothesis/devdata.git", git_dir])
# Copy devdata env file into place.
for source, target in (
("via/devdata.env", ".devdata.env"),
(
"via/devdata/google_drive_credentials.json",
".devdata/google_drive_credentials.json",
),
(
"via/devdata/google_drive_resource_keys.json",
".devdata/google_drive_resource_keys.json",
),
):
copyfile(
os.path.join(git_dir, source),
os.path.join(Path(__file__).parent.parent, target),
)
if __name__ == "__main__":
_get_devdata()
| bsd-2-clause | Python |
81b5961cdf4b9ca7e20920eda3c7f76f96a35a9b | Bump version | pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer | filer/__init__.py | filer/__init__.py | #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.105' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.105.dev1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| bsd-3-clause | Python |
eef9d75a7d019a397d2026612ece76d217747e5b | mark oddity | ZeitOnline/zeit.edit,ZeitOnline/zeit.edit,ZeitOnline/zeit.edit | src/zeit/edit/browser/tests/test_form.py | src/zeit/edit/browser/tests/test_form.py | # Copyright (c) 2012 gocept gmbh & co. kg
# See also LICENSE.txt
from mock import Mock
import zeit.cms.testing
import zeit.edit.browser.form
import zope.formlib.form
import zope.interface
import zope.publisher.browser
import zope.schema
class IExample(zope.interface.Interface):
foo = zope.schema.TextLine(title=u'foo')
class InlineForm(zeit.cms.testing.FunctionalTestCase):
# XXX This test should be moved to zeit.cms.browser, but it seems nearly
# impossible to instantiate an EditForm, so we punt on this for now;
# InlineForms are friendlier (since they don't pull in the
# main_template.pt)
def render_form(self, form_class):
ANY_CONTEXT = Mock()
zope.interface.alsoProvides(ANY_CONTEXT, IExample)
request = zope.publisher.browser.TestRequest()
form = form_class(ANY_CONTEXT, request)
return form()
def test_css_class_on_widget_is_rendered_to_html(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
def setUpWidgets(self):
super(ExampleForm, self).setUpWidgets()
self.widgets['foo'].vivi_css_class = 'barbaz qux'
self.assertEllipsis("""\
...<div class="field fieldname-foo required barbaz qux">
<div class="label">...""", self.render_form(ExampleForm))
def test_widget_without_css_class_does_not_break(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
self.assertEllipsis("""\
...<div class="field fieldname-foo required">
<div class="label">...""", self.render_form(ExampleForm))
| # Copyright (c) 2012 gocept gmbh & co. kg
# See also LICENSE.txt
from mock import Mock
import zeit.cms.testing
import zeit.edit.browser.form
import zope.formlib.form
import zope.interface
import zope.publisher.browser
import zope.schema
class IExample(zope.interface.Interface):
foo = zope.schema.TextLine(title=u'foo')
class InlineForm(zeit.cms.testing.FunctionalTestCase):
def render_form(self, form_class):
ANY_CONTEXT = Mock()
zope.interface.alsoProvides(ANY_CONTEXT, IExample)
request = zope.publisher.browser.TestRequest()
form = form_class(ANY_CONTEXT, request)
return form()
def test_css_class_on_widget_is_rendered_to_html(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
def setUpWidgets(self):
super(ExampleForm, self).setUpWidgets()
self.widgets['foo'].vivi_css_class = 'barbaz qux'
self.assertEllipsis("""\
...<div class="field fieldname-foo required barbaz qux">
<div class="label">...""", self.render_form(ExampleForm))
def test_widget_without_css_class_does_not_break(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
self.assertEllipsis("""\
...<div class="field fieldname-foo required">
<div class="label">...""", self.render_form(ExampleForm))
| bsd-3-clause | Python |
22c727e0e38953f3647a8a825b01fcf142c06c64 | Bump version. | armet/python-armet | armet/_version.py | armet/_version.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division
__version_info__ = (0, 4, 17)
__version__ = '.'.join(map(str, __version_info__))
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division
__version_info__ = (0, 4, 16)
__version__ = '.'.join(map(str, __version_info__))
| mit | Python |
7a1ddf38db725f0696482a271c32fa297d629316 | Set the version to the next patch release number (in dev mode) | jszakmeister/trac-backlog,jszakmeister/trac-backlog | backlog/__init__.py | backlog/__init__.py | __version__ = (0, 2, 2, 'dev', 0)
def get_version():
version = '%d.%d.%d' % __version__[0:3]
if __version__[3]:
version = '%s-%s%s' % (version, __version__[3],
(__version__[4] and str(__version__[4])) or '')
return version
| __version__ = (0, 2, 1, '', 0)
def get_version():
version = '%d.%d.%d' % __version__[0:3]
if __version__[3]:
version = '%s-%s%s' % (version, __version__[3],
(__version__[4] and str(__version__[4])) or '')
return version
| bsd-3-clause | Python |
b808784711242099d8fbf9f0f1c7d13ca5a5a1d7 | Bump the version to 0.3.2 | dmtucker/backlog | backlog/__init__.py | backlog/__init__.py | """A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.2'
| """A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.1'
| lgpl-2.1 | Python |
d0c6ae0dbb68fad31c5f3e51d934b8c7f5e8534f | Add ability to override issue JQL in runner | ianunruh/jira-zd-bridge | jzb/runner.py | jzb/runner.py | from argparse import ArgumentParser
import logging
import sys
import jira
from redis import StrictRedis
import yaml
import zendesk
from jzb import LOG
from jzb.bridge import Bridge
from jzb.util import objectize
def configure_logger(level):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
LOG.addHandler(handler)
LOG.setLevel(level)
def main():
parser = ArgumentParser()
parser.add_argument('-c', '--config-file', default='config.yml')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-Q', '--query')
args = parser.parse_args()
if args.verbose:
configure_logger(logging.DEBUG)
else:
configure_logger(logging.INFO)
with open(args.config_file) as fp:
config = objectize(yaml.load(fp))
redis = StrictRedis(host=config.redis_host, port=config.redis_port)
jira_client = jira.JIRA(server=config.jira_url,
basic_auth=(config.jira_username, config.jira_password))
zd_client = zendesk.Client(url=config.zd_url,
username=config.zd_username,
password=config.zd_password)
bridge = Bridge(jira_client=jira_client,
zd_client=zd_client,
redis=redis,
config=config)
if args.query:
bridge.jira_issue_jql = args.query
bridge.sync()
if __name__ == '__main__':
main()
| from argparse import ArgumentParser
import logging
import sys
import jira
from redis import StrictRedis
import yaml
import zendesk
from jzb import LOG
from jzb.bridge import Bridge
from jzb.util import objectize
def configure_logger(level):
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
LOG.addHandler(handler)
LOG.setLevel(level)
def main():
parser = ArgumentParser()
parser.add_argument('-c', '--config-file', default='config.yml')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
configure_logger(logging.DEBUG)
else:
configure_logger(logging.INFO)
with open(args.config_file) as fp:
config = objectize(yaml.load(fp))
redis = StrictRedis(host=config.redis_host, port=config.redis_port)
jira_client = jira.JIRA(server=config.jira_url,
basic_auth=(config.jira_username, config.jira_password))
zd_client = zendesk.Client(url=config.zd_url,
username=config.zd_username,
password=config.zd_password)
bridge = Bridge(jira_client=jira_client,
zd_client=zd_client,
redis=redis,
config=config)
bridge.sync()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
122b0982d1e10aada383bbd373518d049e54b906 | Prepare for release 0.9pbs.107 | pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer | filer/__init__.py | filer/__init__.py | #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.107' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.107.dev1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.