repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
TorpedoXL/namebench | libnamebench/nameserver_list.py | 173 | 24485 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to work with bunches of nameservers."""
__author__ = '[email protected] (Thomas Stromberg)'
import datetime
import operator
import Queue
import random
import sys
import threading
import time
# 3rd party libraries
import dns.resolver
import conn_quality
import addr_util
import nameserver
import util
NS_CACHE_SLACK = 2
CACHE_VER = 4
PREFERRED_HEALTH_TIMEOUT_MULTIPLIER = 1.5
SYSTEM_HEALTH_TIMEOUT_MULTIPLIER = 2
TOO_DISTANT_MULTIPLIER = 4.75
DEFAULT_MAX_SERVERS_TO_CHECK = 350
# If we can't ping more than this, go into slowmode.
MIN_PINGABLE_PERCENT = 5
MIN_HEALTHY_PERCENT = 10
SLOW_MODE_THREAD_COUNT = 6
# Windows behaves in unfortunate ways if too many threads are specified
DEFAULT_THREAD_COUNT = 35
MAX_INITIAL_HEALTH_THREAD_COUNT = 35
class OutgoingUdpInterception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TooFewNameservers(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ThreadFailure(Exception):
def __init__(self):
pass
class QueryThreads(threading.Thread):
"""Quickly see which nameservers are awake."""
def __init__(self, input_queue, results_queue, action_type, checks=None):
threading.Thread.__init__(self)
self.input = input_queue
self.action_type = action_type
self.results = results_queue
self.checks = checks
self.halt = False
def stop(self):
self.halt = True
def run(self):
"""Iterate over the queue, processing each item."""
while not self.halt and not self.input.empty():
# check_wildcards is special: it has a tuple of two nameservers
if self.action_type == 'wildcard_check':
try:
(ns, other_ns) = self.input.get_nowait()
except Queue.Empty:
return
if ns.is_disabled or other_ns.is_disabled:
self.results.put(None)
continue
else:
self.results.put((ns, ns.TestSharedCache(other_ns)))
# everything else only has a single nameserver.
else:
try:
ns = self.input.get_nowait()
except Queue.Empty:
return
if ns.is_disabled:
self.results.put(None)
continue
if self.action_type == 'ping':
self.results.put(ns.CheckHealth(fast_check=True))
elif self.action_type == 'health':
self.results.put(ns.CheckHealth(sanity_checks=self.checks))
elif self.action_type == 'final':
self.results.put(ns.CheckHealth(sanity_checks=self.checks, final_check=True))
elif self.action_type == 'port_behavior':
self.results.put(ns.CheckHealth(sanity_checks=self.checks, port_check=True))
elif self.action_type == 'censorship':
self.results.put(ns.CheckCensorship(self.checks))
elif self.action_type == 'store_wildcards':
self.results.put(ns.StoreWildcardCache())
elif self.action_type == 'node_id':
self.results.put(ns.UpdateNodeIds())
elif self.action_type == 'update_hostname':
self.results.put(ns.UpdateHostname())
else:
raise ValueError('Invalid action type: %s' % self.action_type)
class NameServers(list):
def __init__(self, thread_count=DEFAULT_THREAD_COUNT, max_servers_to_check=DEFAULT_MAX_SERVERS_TO_CHECK):
self._ips = set()
self.thread_count = thread_count
super(NameServers, self).__init__()
self.client_latitude = None
self.client_longitude = None
self.client_country = None
self.client_domain = None
self.client_asn = None
self.max_servers_to_check = max_servers_to_check
@property
def visible_servers(self):
return [x for x in self if not x.is_hidden]
@property
def enabled_servers(self):
return [x for x in self.visible_servers if not x.is_disabled]
@property
def disabled_servers(self):
return [x for x in self.visible_servers if x.is_disabled]
@property
def enabled_keepers(self):
return [x for x in self.enabled_servers if x.is_keeper]
@property
def enabled_supplemental(self):
return [x for x in self.enabled_servers if not x.is_keeper]
@property
def supplemental_servers(self):
return [x for x in self if not x.is_keeper]
@property
def country_servers(self):
return [x for x in self if x.country_code == self.client_country]
# Return a list of servers that match a particular tag
def HasTag(self, tag):
return [x for x in self if x.HasTag(tag)]
# Return a list of servers that match a particular tag
def HasVisibleTag(self, tag):
return [x for x in self.visible_servers if x.HasTag(tag)]
def SortEnabledByFastest(self):
"""Return a list of healthy servers in fastest-first order."""
return sorted(self.enabled_servers, key=operator.attrgetter('check_average'))
def SortEnabledByNearest(self):
"""Return a list of healthy servers in fastest-first order."""
return sorted(self.enabled_servers, key=operator.attrgetter('fastest_check_duration'))
def msg(self, msg, count=None, total=None, **kwargs):
if self.status_callback:
self.status_callback(msg, count=count, total=total, **kwargs)
else:
print '%s [%s/%s]' % (msg, count, total)
def _GetObjectForIP(self, ip):
return [x for x in self if x.ip == ip][0]
def _MergeNameServerData(self, ns):
existing = self._GetObjectForIP(ns.ip)
existing.tags.update(ns.tags)
if ns.system_position is not None:
existing.system_position = ns.system_position
elif ns.dhcp_position is not None:
existing.dhcp_position = ns.dhcp_position
def append(self, ns):
"""Add a nameserver to the list, guaranteeing uniqueness."""
if ns.ip in self._ips:
self._MergeNameServerData(ns)
else:
super(NameServers, self).append(ns)
self._ips.add(ns.ip)
def SetTimeouts(self, timeout, ping_timeout, health_timeout):
if len(self.enabled_servers) > 1:
cq = conn_quality.ConnectionQuality(status_callback=self.status_callback)
(intercepted, avg_latency, max_latency) = cq.CheckConnectionQuality()[0:3]
if intercepted:
raise OutgoingUdpInterception(
'Your router or Internet Service Provider appears to be intercepting '
'and redirecting all outgoing DNS requests. This means you cannot '
'benchmark or utilize alternate DNS servers. Please adjust your '
'router configuration or file a support request with your ISP.'
)
if (max_latency * 2) > health_timeout:
health_timeout = max_latency * 2
self.msg('Set health timeout to %.2fs' % health_timeout)
if (max_latency * 1.1) > ping_timeout:
ping_timeout = avg_latency * 1.4
self.msg('Set ping timeout to %.2fs' % ping_timeout)
for ns in self:
ns.timeout = timeout
ns.ping_timeout = ping_timeout
ns.health_timeout = health_timeout
def SetClientLocation(self, latitude, longitude, client_country):
self.client_latitude = latitude
self.client_longitude = longitude
self.client_country = client_country
def SetNetworkLocation(self, domain, asn):
self.client_domain = domain
self.client_asn = asn
def FilterByTag(self, include_tags=None, require_tags=None):
for ns in self:
if include_tags:
if not ns.MatchesTags(include_tags):
ns.tags.add('hidden')
if require_tags:
for tag in require_tags:
if not ns.HasTag(tag):
ns.tags.add('hidden')
if not self.enabled_servers:
raise TooFewNameservers('No nameservers specified matched tags %s %s' % (include_tags, require_tags))
if require_tags:
self.msg("%s of %s nameservers have tags: %s (%s required)" %
(len(self.visible_servers), len(self), ', '.join(include_tags),
', '.join(require_tags)))
else:
self.msg("%s of %s nameservers have tags: %s" %
(len(self.visible_servers), len(self), ', '.join(include_tags)))
def HasEnoughInCountryServers():
return len(self.country_servers) > self.max_servers_to_check
def NearbyServers(self, max_distance):
srv_by_dist = sorted([(x.DistanceFromCoordinates(self.client_latitude, self.client_longitude), x)
for x in self.HasVisibleTag('regional')], key=operator.itemgetter(0))
for distance, ns in srv_by_dist:
if distance < float(max_distance):
yield ns
def AddNetworkTags(self):
"""Add network tags for each nameserver."""
if self.client_domain:
provider = self.client_domain.split('.')[0]
else:
provider = None
for ns in self:
ns.AddNetworkTags(self.client_domain, provider, self.client_asn, self.client_country)
def AddLocalityTags(self, max_distance):
if self.client_latitude:
count = 0
for ns in self.NearbyServers(max_distance):
count += 1
if count > self.max_servers_to_check:
break
ns.tags.add('nearby')
def DisableSlowestSupplementalServers(self, multiplier=TOO_DISTANT_MULTIPLIER, max_servers=None,
prefer_asn=None):
"""Disable servers who's fastest duration is multiplier * average of best 10 servers."""
if not max_servers:
max_servers = self.max_servers_to_check
supplemental_servers = self.enabled_supplemental
fastest = [x for x in self.SortEnabledByFastest()][:10]
best_10 = util.CalculateListAverage([x.fastest_check_duration for x in fastest])
cutoff = best_10 * multiplier
self.msg("Removing secondary nameservers slower than %0.2fms (max=%s)" % (cutoff, max_servers))
for (idx, ns) in enumerate(self.SortEnabledByFastest()):
hide = False
if ns not in supplemental_servers:
continue
if ns.fastest_check_duration > cutoff:
hide = True
if idx > max_servers:
hide = True
if hide:
matches = ns.MatchesTags(nameserver.PROVIDER_TAGS)
if matches:
self.msg("%s seems slow, but has tag: %s" % (ns, matches))
else:
ns.tags.add('hidden')
def _FastestByLocalProvider(self):
"""Find the fastest DNS server by the client provider."""
fastest = self.SortEnabledByFastest()
# Give preference in tag order
for tag in nameserver.PROVIDER_TAGS:
for ns in fastest:
if ns.HasTag(tag):
return ns
def HideBrokenIPV6Servers(self):
"""Most people don't care about these."""
for ns in self.disabled_servers:
if ns.HasTag('ipv6') and not ns.is_hidden:
ns.tags.add('hidden')
def HideSlowSupplementalServers(self, target_count):
"""Given a target count, delete nameservers that we do not plan to test."""
# Magic secondary mixing algorithm:
# - Half of them should be the "nearest" nameservers
# - Half of them should be the "fastest average" nameservers
self.msg("Hiding all but %s servers" % target_count)
keepers = self.enabled_keepers
isp_keeper = self._FastestByLocalProvider()
if isp_keeper:
self.msg("%s is the fastest DNS server provided by your ISP." % isp_keeper)
keepers.append(isp_keeper)
supplemental_servers_needed = target_count - len(keepers)
if supplemental_servers_needed < 1 or not self.enabled_supplemental:
return
nearest_needed = int(supplemental_servers_needed / 2.0)
if supplemental_servers_needed < 50:
self.msg("Picking %s secondary servers to use (%s nearest, %s fastest)" %
(supplemental_servers_needed, nearest_needed, supplemental_servers_needed - nearest_needed))
# Phase two is picking the nearest secondary server
supplemental_servers_to_keep = []
for ns in self.SortEnabledByNearest():
if ns not in keepers:
if not supplemental_servers_to_keep and supplemental_servers_needed < 15:
self.msg('%s appears to be the nearest regional (%0.2fms)' % (ns, ns.fastest_check_duration))
supplemental_servers_to_keep.append(ns)
if len(supplemental_servers_to_keep) >= nearest_needed:
break
# Phase three is hiding the slower secondary servers
for ns in self.SortEnabledByFastest():
if ns not in keepers and ns not in supplemental_servers_to_keep:
supplemental_servers_to_keep.append(ns)
if len(supplemental_servers_to_keep) >= supplemental_servers_needed:
break
for ns in self.supplemental_servers:
if ns not in supplemental_servers_to_keep and ns not in keepers:
ns.tags.add('hidden')
def CheckHealth(self, sanity_checks=None, max_servers=11, prefer_asn=None):
"""Filter out unhealthy or slow replica servers."""
self.PingNameServers()
if len(self.enabled_servers) > max_servers:
self.DisableSlowestSupplementalServers(prefer_asn=prefer_asn)
self.RunHealthCheckThreads(sanity_checks['primary'])
if len(self.enabled_servers) > max_servers:
self._DemoteSecondaryGlobalNameServers()
self.HideSlowSupplementalServers(int(max_servers * NS_CACHE_SLACK))
if len(self.enabled_servers) > 1:
self.RunNodeIdThreads()
self.CheckCacheCollusion()
self.RunNodeIdThreads()
self.HideSlowSupplementalServers(max_servers)
self.RunFinalHealthCheckThreads(sanity_checks['secondary'])
self.RunNodeIdThreads()
self.HideBrokenIPV6Servers()
# One more time!
if len(self.enabled_servers) > 1:
self.RunNodeIdThreads()
self.CheckCacheCollusion()
self.RunHostnameThreads()
if not self.enabled_servers:
raise TooFewNameservers('None of the nameservers tested are healthy')
def CheckCensorship(self, sanity_checks):
pass
def _RemoveGlobalWarnings(self):
"""If all nameservers have the same warning, remove it. It's likely false."""
ns_count = len(self.enabled_servers)
seen_counts = {}
# No sense in checking for duplicate warnings if we only have one server.
if len(self.enabled_servers) == 1:
return
for ns in self.enabled_servers:
for warning in ns.warnings:
seen_counts[warning] = seen_counts.get(warning, 0) + 1
for warning in seen_counts:
if seen_counts[warning] == ns_count:
self.msg('All nameservers have warning: %s (likely a false positive)' % warning)
for ns in self.enabled_servers:
ns.warnings.remove(warning)
def _DemoteSecondaryGlobalNameServers(self):
"""For global nameservers, demote the slower IP to secondary status."""
seen = {}
for ns in self.SortEnabledByFastest():
if ns.MatchesTags(['preferred', 'global']):
if ns.provider in seen and not ns.MatchesTags(['system', 'specified']):
faster_ns = seen[ns.provider]
if ns.HasTag('preferred'):
self.msg('Making %s the primary anycast - faster than %s by %2.2fms' %
(faster_ns.name_and_node, ns.name_and_node, ns.check_average - faster_ns.check_average))
ns.tags.add('hidden')
else:
seen[ns.provider] = ns
def ResetTestResults(self):
"""Reset the testng status of all disabled hosts."""
return [ns.ResetTestStatus() for ns in self]
def CheckCacheCollusion(self):
"""Mark if any nameservers share cache, especially if they are slower."""
self.RunWildcardStoreThreads()
sleepy_time = 4
self.msg("Waiting %ss for TTL's to decrement." % sleepy_time)
time.sleep(sleepy_time)
test_combos = []
good_nameservers = [x for x in self.SortEnabledByFastest()]
for ns in good_nameservers:
for compare_ns in good_nameservers:
if ns != compare_ns:
test_combos.append((compare_ns, ns))
results = self.RunCacheCollusionThreads(test_combos)
while not results.empty():
(ns, shared_ns) = results.get()
if shared_ns:
ns.shared_with.add(shared_ns)
shared_ns.shared_with.add(ns)
if ns.is_disabled or shared_ns.is_disabled:
continue
if ns.check_average > shared_ns.check_average:
slower = ns
faster = shared_ns
else:
slower = shared_ns
faster = ns
if slower.system_position == 0:
faster.DisableWithMessage('Shares-cache with current primary DNS server')
slower.warnings.add('Replica of %s' % faster.ip)
elif slower.is_keeper and not faster.is_keeper:
faster.DisableWithMessage('Replica of %s [%s]' % (slower.name, slower.ip))
slower.warnings.add('Replica of %s [%s]' % (faster.name, faster.ip))
else:
diff = slower.check_average - faster.check_average
self.msg("Disabling %s - slower replica of %s by %0.1fms." % (slower.name_and_node, faster.name_and_node, diff))
slower.DisableWithMessage('Slower replica of %s [%s]' % (faster.name, faster.ip))
faster.warnings.add('Replica of %s [%s]' % (slower.name, slower.ip))
def _LaunchQueryThreads(self, action_type, status_message, items,
thread_count=None, **kwargs):
"""Launch query threads for a given action type.
Args:
action_type: a string describing an action type to pass
status_message: Status to show during updates.
items: A list of items to pass to the queue
thread_count: How many threads to use (int)
kwargs: Arguments to pass to QueryThreads()
Returns:
results_queue: Results from the query tests.
Raises:
TooFewNameservers: If no tested nameservers are healthy.
"""
threads = []
input_queue = Queue.Queue()
results_queue = Queue.Queue()
# items are usually nameservers
random.shuffle(items)
for item in items:
input_queue.put(item)
if not thread_count:
thread_count = self.thread_count
if thread_count > len(items):
thread_count = len(items)
status_message += ' (%s threads)' % thread_count
self.msg(status_message, count=0, total=len(items))
for _ in range(0, thread_count):
thread = QueryThreads(input_queue, results_queue, action_type, **kwargs)
try:
thread.start()
except:
self.msg("ThreadingError with %s threads: waiting for completion before retrying." % thread_count)
for thread in threads:
thread.stop()
thread.join()
raise ThreadFailure()
threads.append(thread)
while results_queue.qsize() != len(items):
self.msg(status_message, count=results_queue.qsize(), total=len(items))
time.sleep(0.5)
self.msg(status_message, count=results_queue.qsize(), total=len(items))
for thread in threads:
thread.join()
if not self.enabled_servers:
raise TooFewNameservers('None of the %s nameservers tested are healthy' % len(self.visible_servers))
return results_queue
def RunCacheCollusionThreads(self, test_combos):
"""Schedule and manage threading for cache collusion checks."""
return self._LaunchQueryThreads('wildcard_check', 'Running cache-sharing checks on %s servers' % len(self.enabled_servers), test_combos)
def PingNameServers(self):
"""Quickly ping nameservers to see which are available."""
start = datetime.datetime.now()
test_servers = list(self.enabled_servers)
try:
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
except ThreadFailure:
self.msg("It looks like you couldn't handle %s threads, trying again with %s (slow)" % (self.thread_count, SLOW_MODE_THREAD_COUNT))
self.thread_count = SLOW_MODE_THREAD_COUNT
self.ResetTestResults()
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
success_rate = self.GetHealthyPercentage(compare_to=test_servers)
if success_rate < MIN_PINGABLE_PERCENT:
self.msg('How odd! Only %0.1f percent of name servers were pingable. Trying again with %s threads (slow)'
% (success_rate, SLOW_MODE_THREAD_COUNT))
self.ResetTestResults()
self.thread_count = SLOW_MODE_THREAD_COUNT
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
if self.enabled_servers:
self.msg('%s of %s servers are available (duration: %s)' %
(len(self.enabled_servers), len(test_servers), datetime.datetime.now() - start))
return results
def GetHealthyPercentage(self, compare_to=None):
if not compare_to:
compare_to = self.visible_servers
return (float(len(self.enabled_servers)) / float(len(compare_to))) * 100
def RunHealthCheckThreads(self, checks, min_healthy_percent=MIN_HEALTHY_PERCENT):
"""Quickly ping nameservers to see which are healthy."""
test_servers = self.enabled_servers
status_msg = 'Running initial health checks on %s servers' % len(test_servers)
if self.thread_count > MAX_INITIAL_HEALTH_THREAD_COUNT:
thread_count = MAX_INITIAL_HEALTH_THREAD_COUNT
else:
thread_count = self.thread_count
try:
results = self._LaunchQueryThreads('health', status_msg, test_servers,
checks=checks, thread_count=thread_count)
except ThreadFailure:
self.msg("It looks like you couldn't handle %s threads, trying again with %s (slow)" % (thread_count, SLOW_MODE_THREAD_COUNT))
self.thread_count = SLOW_MODE_THREAD_COUNT
self.ResetTestResults()
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', list(self.visible_servers))
success_rate = self.GetHealthyPercentage(compare_to=test_servers)
if success_rate < min_healthy_percent:
self.msg('How odd! Only %0.1f percent of name servers are healthy. Trying again with %s threads (slow)'
% (success_rate, SLOW_MODE_THREAD_COUNT))
self.ResetTestResults()
self.thread_count = SLOW_MODE_THREAD_COUNT
time.sleep(5)
results = self._LaunchQueryThreads('health', status_msg, test_servers,
checks=checks, thread_count=thread_count)
self.msg('%s of %s tested name servers are healthy' %
(len(self.enabled_servers), len(test_servers)))
return results
def RunNodeIdThreads(self):
"""Update node id status on all servers."""
status_msg = 'Checking node ids on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('node_id', status_msg, list(self.enabled_servers))
def RunHostnameThreads(self):
"""Update node id status on all servers."""
status_msg = 'Updating hostnames on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('update_hostname', status_msg, list(self.enabled_servers))
def RunFinalHealthCheckThreads(self, checks):
"""Quickly ping nameservers to see which are healthy."""
status_msg = 'Running final health checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('final', status_msg, list(self.enabled_servers), checks=checks)
def RunCensorshipCheckThreads(self, checks):
"""Quickly ping nameservers to see which are healthy."""
status_msg = 'Running censorship checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('censorship', status_msg, list(self.enabled_servers), checks=checks)
def RunPortBehaviorThreads(self):
"""Get port behavior data."""
status_msg = 'Running port behavior checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('port_behavior', status_msg, list(self.enabled_servers))
def RunWildcardStoreThreads(self):
"""Store a wildcard cache value for all nameservers (using threads)."""
status_msg = 'Waiting for wildcard cache queries from %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('store_wildcards', status_msg, list(self.enabled_servers))
| apache-2.0 |
yesbox/ansible | test/units/module_utils/basic/test_known_hosts.py | 74 | 2365 | # -*- coding: utf-8 -*-
# (c) 2015, Michael Scherer <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from ansible.module_utils import known_hosts
class TestAnsibleModuleKnownHosts(unittest.TestCase):
urls = {
'ssh://one.example.org/example.git':
{'is_ssh_url': True, 'get_fqdn': 'one.example.org'},
'ssh+git://two.example.org/example.git':
{'is_ssh_url': True, 'get_fqdn': 'two.example.org'},
'rsync://three.example.org/user/example.git':
{'is_ssh_url': False, 'get_fqdn': 'three.example.org'},
'[email protected]:user/example.git':
{'is_ssh_url': True, 'get_fqdn': 'four.example.org'},
'git+ssh://five.example.org/example.git':
{'is_ssh_url': True, 'get_fqdn': 'five.example.org'},
'ssh://six.example.org:21/example.org':
{'is_ssh_url': True, 'get_fqdn': 'six.example.org'},
'ssh://[2001:DB8::abcd:abcd]/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
'ssh://[2001:DB8::abcd:abcd]:22/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
'username@[2001:DB8::abcd:abcd]/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
'username@[2001:DB8::abcd:abcd]:22/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
}
def test_is_ssh_url(self):
for u in self.urls:
self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url'])
def test_get_fqdn(self):
for u in self.urls:
self.assertEqual(known_hosts.get_fqdn(u), self.urls[u]['get_fqdn'])
| gpl-3.0 |
NYU-DevOps-Fall2017-PromotionsTeam/promotions | features/steps/promotion_steps.py | 1 | 7975 | from os import getenv
import requests
from behave import *
import json
from app import server
from verify import expect
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
WAIT_SECONDS = 30
BASE_URL = getenv('BASE_URL', 'http://localhost:5001/')
#BASE_URL = getenv('BASE_URL', '/')
#########################################
# GIVEN STATEMENTS #
#########################################
@given(u'the following promotions')
def step_impl(context):
server.data_reset()
for row in context.table:
server.data_load(
row['id'],
{
"name": row['name'],
"promo_type": row['promo_type'],
"value": float(row['value']),
"start_date": row['start_date'],
"end_date": row['end_date'],
"detail": row['detail']
}
)
#########################################
# WHEN STATEMENTS #
#########################################
@when(u'I visit "{page}"')
def step_impl(context, page):
# print("Targer URL", BASE_URL +'{}'.format(page))
context.resp = context.app.get('http://localhost:5001/' +'{}'.format(page))
@when(u'I visit the root url')
def step_impl(context):
context.driver.get(context.base_url)
print(context.driver.current_url)
@when(u'I press the "{button}" button')
def step_impl(context, button):
button_id = button.lower() + '-btn'
context.driver.find_element_by_id(button_id).click()
@when(u'I set the "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = 'promo_' + element_name.lower()
element = context.driver.find_element_by_id(element_id)
element.clear()
element.send_keys(text_string)
@when(u'I change field "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = 'promo_' + element_name.lower()
#element = context.driver.find_element_by_id(element_id)
element = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.presence_of_element_located((By.ID, element_id))
)
element.clear()
element.send_keys(text_string)
@when(u'I retrieve "{url}" with id "{id}"')
def step_impl(context, url, id):
target_url = '{}/{}'.format(url, id)
context.resp = context.app.get('http://localhost:5001/' + target_url)
context.data = json.loads(context.resp.data.decode('utf-8'))
assert isinstance(context.data, dict)
@when(u'I update "{url}" with id "{id}"')
def step_impl(context, url, id):
target_url = '{}/{}'.format(url, id)
headers = {'content-type': 'application/json'}
data = json.dumps(context.data)
context.resp = context.app.put('http://localhost:5001/' + target_url, data=data, headers=headers)
assert context.resp.status_code == 200
@when(u'I change "{key}" to "{value}"')
def step_impl(context, key, value):
if key == 'value':
value = float(value)
context.data[key] = value
@when(u'I delete "{url}" with id "{id}"')
def step_impl(context, url, id):
target_url = '{}/{}'.format(url, id)
context.resp = context.app.delete('http://localhost:5001/' + target_url)
assert context.resp.status_code == 204
@when(u'I create a promotion')
def step_impl(context):
target_url = 'promotions'
headers = {'content-type': 'application/json'}
data=json.dumps({})
context.resp = context.app.post('http://localhost:5001/' + target_url, data=data, headers=headers)
@when(u'I call POST with Incorrect content-type')
def step_impl(context):
target_url = 'promotions'
#headers = {'content-type': 'application/json'}
headers = {'content-type': 'not_application/json'}
data=json.dumps({})
context.resp = context.app.post('http://localhost:5001/' + target_url, data=data, headers=headers)
@when(u'I send a PUT request to \'/promotions/delete-all\'')
def step_impl(context):
target_url = 'promotions/delete-all'
context.resp = context.app.put('http://localhost:5001/' + target_url)
#########################################
# THEN STATEMENTS #
#########################################
@then(u'I should see "{message}" in the title')
def step_impl(context, message):
""" Check the document title for a message """
expect(context.driver.title).to_contain(message)
@then(u'I should get a response code "{code}"')
def step_impl(context, code):
code = int(code)
assert context.resp.status_code == code
@then(u'There should be "{count}" promotions')
def step_impl(context, count):
count = int(count)
data = json.loads(context.resp.data.decode('utf-8'))
if isinstance(data, list):
assert len(data) == count
else:
assert isinstance(data, dict)
@then(u'I should see "{promo_name}"')
def step_impl(context, promo_name):
data = json.loads(context.resp.data.decode('utf-8'))
if isinstance(data, list):
names = [promo['name'] for promo in data]
assert promo_name in names
else:
assert data['name'] == promo_name
@then(u'I should not see "{promo_name}"')
def step_impl(context, promo_name):
data = json.loads(context.resp.data.decode('utf-8'))
if isinstance(data, list):
names = [promo['name'] for promo in data]
assert promo_name not in names
else:
assert data['name'] != promo_name
@then(u'I will see "{promo_name}" with "{key}" as "{value}"')
def step_impl(context, promo_name, key, value):
data = json.loads(context.resp.data.decode('utf-8'))
if key == 'value':
value = float(value)
if isinstance(data, list):
for promo in data:
if promo['name'] == promo_name:
assert promo[key] == value
break
else:
assert data[key] == value
@then(u'I will not see a promotion with "{key}" as "{value}"')
def step_impl(context, key, value):
data = json.loads(context.resp.data.decode('utf-8'))
if key == 'value':
value = float(value)
if isinstance(data, list):
for promo in data:
assert promo[key] != value
else:
assert data[key] != value
@then(u'I reset the server db for further tests')
def step_impl(context):
server.data_reset()
@then(u'I should see the message "{message}"')
def step_impl(context, message):
#element = context.driver.find_element_by_id('flash_message')
#expect(element.text).to_contain(message)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'flash_message'),
message
)
)
expect(found).to_be(True)
@then(u'I should see "{name}" in the results')
def step_impl(context, name):
#element = context.driver.find_element_by_id('search_results')
#expect(element.text).to_contain(name)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'search_results'),
name
)
)
expect(found).to_be(True)
@then(u'I should not see "{name}" in the results')
def step_impl(context, name):
element = context.driver.find_element_by_id('search_results')
error_msg = "I should not see '%s' in '%s'" % (name, element.text)
expect(element.text).to_not_contain(name)
@then(u'I should see "{text_string}" in the "{element_name}" field')
def step_impl(context, text_string, element_name):
element_id = 'promo_' + element_name.lower()
#element = context.driver.find_element_by_id(element_id)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element_value(
(By.ID, element_id),
text_string
)
)
#expect(element.get_attribute('value')).to_equal(text_string)
expect(found).to_be(True) | apache-2.0 |
wuyuewen/libcloud | libcloud/test/common/test_gandi.py | 66 | 1293 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.utils.py3 import xmlrpclib
from libcloud.test import MockHttp
class BaseGandiMockHttp(MockHttp):
def _get_method_name(self, type, use_param, qs, path):
return "_xmlrpc"
def _xmlrpc(self, method, url, body, headers):
params, methodName = xmlrpclib.loads(body)
meth_name = '_xmlrpc__' + methodName.replace('.', '_')
if self.type:
meth_name = '%s_%s' % (meth_name, self.type)
return getattr(self, meth_name)(method, url, body, headers)
| apache-2.0 |
erikge/watch_gyp | test/generator-output/gyptest-rules.py | 19 | 1839 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
'-G', 'xcode_ninja_target_pattern=^pull_in_all_actions$',
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
| bsd-3-clause |
googleapis/python-compute | google/cloud/compute_v1/services/region_notification_endpoints/transports/__init__.py | 1 | 1104 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import RegionNotificationEndpointsTransport
from .rest import RegionNotificationEndpointsRestTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[RegionNotificationEndpointsTransport]]
_transport_registry["rest"] = RegionNotificationEndpointsRestTransport
__all__ = (
"RegionNotificationEndpointsTransport",
"RegionNotificationEndpointsRestTransport",
)
| apache-2.0 |
Mappy/PyLR | pylr/parser.py | 1 | 15704 | # -*- coding: utf-8 -*-
''' Location parser
.. moduleauthor:: David Marteau <[email protected]>
'''
from collections import namedtuple
from bitstring import BitStream
from .utils import lazyproperty
from .constants import (LATEST_BINARY_VERSION,
BINARY_VERSION_2,
MIN_BYTES_LINE_LOCATION,
MIN_BYTES_POINT_LOCATION,
MIN_BYTES_CLOSED_LINE_LOCATION,
MIN_BYTES_POLYGON,
RELATIVE_COORD_SIZE,
IS_POINT,
HAS_ATTRIBUTES,
GEOCOORD_SIZE,
POINT_ALONG_LINE_SIZE,
POINT_WITH_ACCESS_SIZE,
POINT_OFFSET_SIZE,
AREA_CODE_CIRCLE,
AREA_CODE_RECTANGLE,
AREA_CODE_POLYGON,
RECTANGLE_SIZE,
LARGE_RECTANGLE_SIZE,
GRID_SIZE,
LARGE_GRID_SIZE,
LRP_SIZE,
CIRCLE_BASE_SIZE,
LocationType)
class BinaryParseError(Exception):
pass
class BinaryVersionError(BinaryParseError):
pass
class InvalidDataSizeError(BinaryParseError):
pass
# The Constant RFU (Reserved for Future Use)
RFU_BITS = 'uint:1'
# number of bits used for attributes flag
ATTR_FLAG_BITS = 'uint:1'
# number of bits used for point flag
POINT_FLAG_BITS = 'uint:1'
# number of bits used for version
VERSION_BITS = 'uint:3'
AREA_FLAG_BIT0 = 'uint:1'
AREA_FLAG_BIT1 = 'uint:1'
HEADER_BITS = (RFU_BITS,
ATTR_FLAG_BITS,
POINT_FLAG_BITS,
AREA_FLAG_BIT1,
POINT_FLAG_BITS,
VERSION_BITS)
_BinaryHeader = namedtuple('_BinaryHeader', ('arf', 'af', 'pf', 'ver'))
class _RawBinaryData(object):
""" Hold a location reference description as a bit stream."""
MIN_VERSION = BINARY_VERSION_2
MAX_VERSION = LATEST_BINARY_VERSION
def __init__(self, data, base64=False):
""" Constructor.
:param string data: Binaray data
:param bool base64: True if data is coded in base64
"""
if base64:
data = data.decode("base64")
#: raw data size
self._sz = len(data)
#: bit stream used to read data
self._bs = BitStream(bytes=data)
def getbits(self, *bits):
""" Read the given numbers of bits.
:param tuple bits: Tuple of number of bits to read
:returns: Tuple of bit fields
:rtype: tuple
"""
return tuple(self._bs.read(v) for v in bits)
def get_position(self):
""" Returns position in the bit stream.
:returns: Position in the bit stream
:rtype: int
"""
return self._bs.pos
@property
def num_bytes(self):
""" Size of the decoded data.
:returns: Size of the decoded data.
:rtype: int
"""
return self._sz
@property
def version(self):
""" Return binary version of the data
:returns: Binary version of the data.
:rtype: int
"""
return self.header.ver
@lazyproperty
def header(self):
""" Parse header (once) location type
:returns: Header data
:rtype: _BinaryHeader
"""
# Validate data size
if self._sz < min(MIN_BYTES_LINE_LOCATION,
MIN_BYTES_POINT_LOCATION,
MIN_BYTES_CLOSED_LINE_LOCATION):
raise InvalidDataSizeError("not enough bytes in data")
_, arf1, pf, arf0, af, ver = self.getbits(*HEADER_BITS)
arf = 2 * arf1 + arf0
return _BinaryHeader(arf, af, pf, ver)
@lazyproperty
def location_type(self):
""" Parse location type (once)
:returns: Location type
:rtype: LocationType
"""
header = self.header
# Check version
if not self.MIN_VERSION <= header.ver <= self.MAX_VERSION:
raise BinaryVersionError("Invalid binary version {}".format(header.ver))
is_point = (header.pf == IS_POINT)
has_attributes = (header.af == HAS_ATTRIBUTES)
area_code = header.arf
is_area = ((area_code == 0 and not is_point and not has_attributes) or area_code > 0)
total_bytes = self._sz
loc_type = LocationType.UNKNOWN
if not is_point and not is_area and has_attributes:
loc_type = LocationType.LINE_LOCATION
elif is_point and not is_area:
if not has_attributes:
if total_bytes == GEOCOORD_SIZE:
loc_type = LocationType.GEO_COORDINATES
else:
raise InvalidDataSizeError("Invalid byte size")
else:
if total_bytes == POINT_ALONG_LINE_SIZE or total_bytes == (POINT_ALONG_LINE_SIZE + POINT_OFFSET_SIZE):
loc_type = LocationType.POINT_ALONG_LINE
elif total_bytes == POINT_WITH_ACCESS_SIZE or total_bytes == (POINT_WITH_ACCESS_SIZE + POINT_OFFSET_SIZE):
loc_type = LocationType.POI_WITH_ACCESS_POINT
else:
raise InvalidDataSizeError("Invalid byte size")
elif is_area and not is_point and has_attributes:
if total_bytes >= MIN_BYTES_CLOSED_LINE_LOCATION:
loc_type = LocationType.CLOSED_LINE
else:
raise InvalidDataSizeError("Invalid byte size")
else:
if area_code == AREA_CODE_CIRCLE:
loc_type = LocationType.CIRCLE
elif area_code == AREA_CODE_RECTANGLE:
# includes case AREA_CODE_GRID
if total_bytes == RECTANGLE_SIZE or total_bytes == LARGE_RECTANGLE_SIZE:
loc_type = LocationType.RECTANGLE
elif total_bytes == GRID_SIZE or total_bytes == LARGE_GRID_SIZE:
loc_type = LocationType.GRID
else:
raise InvalidDataSizeError("Invalid byte size")
elif area_code == AREA_CODE_POLYGON:
if not has_attributes and total_bytes >= MIN_BYTES_POLYGON:
loc_type = LocationType.POLYGON
else:
raise InvalidDataSizeError("Invalid byte size")
else:
raise BinaryParseError('Invalid header')
return loc_type
def init_binary_parsing(data, base64=False):
""" Create an instance of _RawBinaryData
The returned object can be passed to 'parse_binary'
:param string data: string describing the location
:param bool base64: True if encoded in base 64
:returns: Parsable data structure
:rtype: _RawBinaryData
"""
return _RawBinaryData(data, base64)
def parse_binary(data, base64=False):
""" Parse binary data.
Input is original data or an object returned by init_binary_parsing(...)
:param data: string (encoded or not) describing the location
:param bool base64: True if encoded in base 64
:returns: Object describing the parsed location, or an error object
"""
if not isinstance(data, _RawBinaryData):
data = _RawBinaryData(data, base64)
# Get header
loc_type = data.location_type
if loc_type == LocationType.LINE_LOCATION:
return parse_line(data)
elif loc_type == LocationType.POINT_ALONG_LINE:
return parse_point_along_line(data)
elif loc_type == LocationType.GEO_COORDINATES:
return parse_geo_coordinates(data)
elif loc_type == LocationType.POI_WITH_ACCESS_POINT:
return parse_poi_with_access_point(data)
elif loc_type == LocationType.RECTANGLE:
return parse_rectangle(data)
elif loc_type == LocationType.CLOSED_LINE:
return parse_closed_line(data)
elif loc_type == LocationType.CIRCLE:
return parse_circle(data)
elif loc_type == LocationType.GRID:
return parse_grid(data)
elif loc_type == LocationType.POLYGON:
return parse_polygon(data)
else:
return BinaryParseError("Invalid location type")
# ----------------
# Location parsers
# ----------------
HEAD_FIELDS = ('version', 'type')
from .binary import (_parse_first_lrp,
_parse_intermediate_lrp,
_parse_last_line_lrp,
_parse_last_closed_line_attrs,
_parse_offset,
_parse_relative_coordinates,
_parse_absolute_coordinates,
_parse_radius,
_parse_grid_dimensions)
# LINE_LOCATION
LineLocation = namedtuple('LineLocation', HEAD_FIELDS+('flrp', 'llrp', 'points', 'poffs', 'noffs'))
""" Line Location type
"""
def parse_line(rb):
""" Parse line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Line location
:rtype: LineLocation
"""
assert rb.location_type == LocationType.LINE_LOCATION
# number of intermediates points
num_intermediates = (rb.num_bytes - MIN_BYTES_LINE_LOCATION) / LRP_SIZE
flrp = _parse_first_lrp(rb)
points = []
rel = flrp
for _ in range(num_intermediates):
ilrp = _parse_intermediate_lrp(rb, rel)
points.append(ilrp)
rel = ilrp
llrp, pofff, nofff = _parse_last_line_lrp(rb, rel)
poffs = _parse_offset(rb) if pofff else 0
noffs = _parse_offset(rb) if nofff else 0
return LineLocation(rb.version, rb.location_type, flrp, llrp, points, poffs, noffs)
# POINT_ALONG_LINE
PointAlongLineLocation = namedtuple('PointAlongLineLocation', HEAD_FIELDS+('flrp', 'llrp', 'poffs'))
""" Point along location type
"""
def parse_point_along_line(rb):
""" Parse point along line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Point along line location
:rtype: PointAlongLineLocation
"""
assert rb.location_type == LocationType.POINT_ALONG_LINE
flrp = _parse_first_lrp(rb)
llrp, pofff, _ = _parse_last_line_lrp(rb, flrp)
poffs = _parse_offset(rb) if pofff else 0
return PointAlongLineLocation(rb.version, rb.location_type, flrp, llrp, poffs)
# GEO_COORDINATES
GeoCoordinateLocation = namedtuple('GeoCoordinateLocation', HEAD_FIELDS+('coords',))
""" Coordinate location type
"""
def parse_geo_coordinates(rb):
""" Parse geo coordinates location
:param _RawBinaryData rb: Binary data describing the location
:returns: Geographic coordinates location
:rtype: GeoCoordinateLocation
"""
assert rb.location_type == LocationType.GEO_COORDINATES
coords = _parse_absolute_coordinates(rb)
return GeoCoordinateLocation(rb.version, rb.location_type, coords)
# POI_WITH_ACCESS_POINT
PoiWithAccessPointLocation = namedtuple('PoiWithAccessPointLocation', HEAD_FIELDS+(
'flrp', 'llrp', 'poffs', 'coords'))
""" Poi with access location type
"""
def parse_poi_with_access_point(rb):
""" Parse POI with access point
:param _RawBinaryData rb: Binary data describing the location
:returns: POI with access point location
:rtype: PoiWithAccessPointLocation
"""
assert rb.location_type == LocationType.POI_WITH_ACCESS_POINT
flrp = _parse_first_lrp(rb)
llrp, pofff, _ = _parse_last_line_lrp(rb, flrp)
poffs = _parse_offset(rb) if pofff else 0
coords = _parse_relative_coordinates(rb, flrp.coords)
return PoiWithAccessPointLocation(rb.version, rb.location_type, flrp, llrp,
poffs, coords)
# CIRCLE
CircleLocation = namedtuple('CircleLocation', HEAD_FIELDS+('coords', 'radius'))
""" Circle Location type
"""
def parse_circle(rb):
""" Parse circle location
:param _RawBinaryData rb: Binary data describing the location
:returns: Circle location
:rtype: CircleLocation
"""
assert rb.location_type == LocationType.CIRCLE
radius_size = rb.num_bytes - CIRCLE_BASE_SIZE
coords = _parse_absolute_coordinates(rb)
radius = _parse_radius(rb, radius_size)
return CircleLocation(rb.version, rb.location_type, coords, radius)
# RECTANGLE
BBox = namedtuple('BBox', ('minx', 'miny', 'maxx', 'maxy'))
RectangleLocation = namedtuple('RectangleLocation', HEAD_FIELDS+('bbox',))
""" Rectangle Location type
"""
def parse_rectangle(rb):
""" Parse rectangle location
:param _RawBinaryData rb: Binary data describing the location
:returns: Rectangle location
:rtype: RectangleLocation
"""
assert rb.location_type == LocationType.RECTANGLE
bl = _parse_absolute_coordinates(rb)
if rb.num_bytes == LARGE_RECTANGLE_SIZE:
tr = _parse_absolute_coordinates(rb)
else:
tr = _parse_relative_coordinates(rb, bl)
bbox = BBox(bl.lon, bl.lat, tr.lon, tr.lat)
return RectangleLocation(rb.version, rb.location_type, bbox)
# GRID
GridLocation = namedtuple('GridLocation', HEAD_FIELDS+('bbox', 'cols', 'rows'))
""" Grid Location type
"""
def parse_grid(rb):
""" Parse grid location
:param _RawBinaryData rb: Binary data describing the location
:returns: Grid location
:rtype: GridLocation
"""
assert rb.location_type == LocationType.GRID
bl = _parse_absolute_coordinates(rb)
if rb.num_bytes == LARGE_GRID_SIZE:
tr = _parse_absolute_coordinates(rb)
else:
tr = _parse_relative_coordinates(rb, bl)
bbox = BBox(bl.lon, bl.lat, tr.lon, tr.lat)
cols, rows = _parse_grid_dimensions(rb)
return GridLocation(rb.version, rb.location_type, bbox, cols, rows)
# CLOSED LINE
ClosedLineLocation = namedtuple('ClosedLineLocation', HEAD_FIELDS+('flrp', 'points', 'frc', 'fow', 'bear'))
def parse_closed_line(rb):
""" Parse closed line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Closed line location
:rtype: ClosedLineLocation
"""
assert rb.location_type == LocationType.CLOSED_LINE
# number of intermediates points
num_intermediates = (rb.num_bytes - MIN_BYTES_CLOSED_LINE_LOCATION) / LRP_SIZE
flrp = _parse_first_lrp(rb)
points = []
rel = flrp
for _ in range(num_intermediates):
ilrp = _parse_intermediate_lrp(rb, rel)
points.append(ilrp)
rel = ilrp
frc, fow, bear = _parse_last_closed_line_attrs(rb)
return ClosedLineLocation(rb.version, rb.location_type, flrp, points, frc, fow, bear)
# CLOSED LINE
PolygonLocation = namedtuple('PolygonLocation', HEAD_FIELDS+('points',))
def parse_polygon(rb):
""" Parse polygon location
:param _RawBinaryData rb: Binary data describing the location
:returns: Polygon location
:rtype: PolygonLocation
"""
assert rb.location_type == LocationType.POLYGON
# number of points
# MIN_BYTES_POLYGON include first point and 2 relatives points
num_intermediates = 2 + (rb.num_bytes - MIN_BYTES_POLYGON) / RELATIVE_COORD_SIZE
points = []
rel = _parse_absolute_coordinates(rb)
points.append(rel)
for _ in range(num_intermediates):
ilrp = _parse_relative_coordinates(rb, rel)
points.append(ilrp)
rel = ilrp
return PolygonLocation(rb.version, rb.location_type, points)
| apache-2.0 |
ArchiveLabs/dweb_gateway | python/Multihash.py | 1 | 5374 | """
A set of classes to hold different kinds of hashes etc and convert between them,
Much of this was adapted from https://github.com/tehmaze/python-multihash,
which seems to have evolved from the pip3 multihash, which is seriously broken.
"""
import hashlib
import struct
import sha3
import pyblake2
import base58
import binascii
import logging
from sys import version as python_version
if python_version.startswith('3'):
from urllib.parse import urlparse
else:
from urlparse import urlparse # See https://docs.python.org/2/library/urlparse.html
from .Errors import MultihashError
class Multihash(object):
"""
Superclass for all kinds of hashes, this is for convenience in passing things around between some places that want binary, or
multihash or hex.
core storage is as a multihash_binary i.e. [ code, length, digest...]
Each instance:
code = SHA1, SHA256 etc (uses integer conventions from multihash
"""
# Constants
# 0x01..0x0F are app specific (unused)
SHA1 = 0x11
SHA2_256 = 0x12
SHA2_512 = 0x13
SHA3 = 0x14
BLAKE2B = 0x40
BLAKE2S = 0x41
FUNCS = {
SHA1: hashlib.sha1,
SHA2_256: hashlib.sha256,
# Alternative use nacl.hash.sha256(data, encoder=nacl.encoding.RawEncoder) which has different footprint
SHA2_512: hashlib.sha512,
SHA3: lambda: hashlib.new('sha3_512'),
BLAKE2B: lambda: pyblake2.blake2b(),
BLAKE2S: lambda: pyblake2.blake2s(),
}
LENGTHS = {
SHA1: 20,
SHA2_256: 32,
SHA2_512: 64,
SHA3: 64,
BLAKE2B: 64,
BLAKE2S: 32,
}
def assertions(self, code=None):
if code and code != self.code:
raise MultihashError(message="Expecting code {}, got {}".format(code, self.code))
if self.code not in self.FUNCS:
raise MultihashError(message="Unsupported Hash type {}".format(self.code))
if (self.digestlength != len(self.digest)) or (self.digestlength != self.LENGTHS[self.code]):
raise MultihashError(message="Invalid lengths: expect {}, byte {}, len {}"
.format(self.LENGTHS[self.code], self.digestlength, len(self.digest)))
def __init__(self, multihash58=None, sha1hex=None, data=None, code=None, url=None):
"""
Accept variety of parameters,
:param multihash_58:
"""
digest = None
if url: # Assume its of the form somescheme:/somescheme/Q...
logging.debug("url={} {}".format(url.__class__.__name__,url))
if isinstance(url, str) and "/" in url: # https://.../Q...
url = urlparse(url)
if not isinstance(url, str):
multihash58 = url.path.split('/')[-1]
else:
multihash58 = url
if multihash58[0] not in ('5','Q'): # Simplistic check that it looks ok-ish
raise MultihashError(message="Invalid hash portion of URL {}".format(multihash58))
if multihash58:
self._multihash_binary = base58.b58decode(multihash58)
if sha1hex:
if python_version.startswith('2'):
digest = sha1hex.decode('hex') # Python2
else:
digest = bytes.fromhex(sha1hex) # Python3
code = self.SHA1
if data and code:
digest = self._hash(code, data)
if digest and code:
self._multihash_binary = bytearray([code, len(digest)])
self._multihash_binary.extend(digest)
self.assertions() # Check consistency
def _hash(self, code, data):
if not code in self.FUNCS:
raise MultihashError(message="Cant encode hash code={}".format(code))
hashfn = self.FUNCS.get(code)() # Note it calls the function in that strange way hashes work!
if isinstance(data, bytes):
hashfn.update(data)
elif isinstance(data, str):
# In Python 3 this is ok, would be better if we were sure it was utf8
# raise MultihashError(message="Should be passing bytes, not strings as could encode multiple ways") # TODO can remove this if really need to handle UTF8 strings, but better to push conversion upstream
hashfn.update(data.encode('utf-8'))
return hashfn.digest()
def check(self, data):
assert self.digest == self._hash(self.code, data), "Hash doesnt match expected"
@property
def code(self):
return self._multihash_binary[0]
@property
def digestlength(self):
return self._multihash_binary[1]
@property
def digest(self):
"""
:return: bytes, the digest part of any multihash
"""
return self._multihash_binary[2:]
@property
def sha1hex(self):
"""
:return: The hex of the sha1 (as used in DOI sqlite tables)
"""
self.assertions(self.SHA1)
return binascii.hexlify(self.digest).decode('utf-8') # The decode is turn bytes b'a1b2' to str 'a1b2'
@property
def multihash58(self):
foo = base58.b58encode(bytes(self._multihash_binary)) # Documentation says returns bytes, Mac returns string, want string
if isinstance(foo,bytes):
return foo.decode('ascii')
else:
return foo | agpl-3.0 |
Fat-Zer/FreeCAD_sf_master | src/Mod/Path/PathScripts/PathArray.py | 4 | 11223 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2015 Yorik van Havre <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import Path
import PathScripts
from PySide import QtCore
import math
__doc__ = """Path Array object and FreeCAD command"""
# Qt translation handling
def translate(context, text, disambig=None):
return QtCore.QCoreApplication.translate(context, text, disambig)
class ObjectArray:
def __init__(self, obj):
obj.addProperty("App::PropertyLink", "Base",
"Path", "The path to array")
obj.addProperty("App::PropertyEnumeration", "Type",
"Path", QtCore.QT_TRANSLATE_NOOP("App::Property", "Pattern method"))
obj.addProperty("App::PropertyVectorDistance", "Offset",
"Path", "The spacing between the array copies in Linear pattern")
obj.addProperty("App::PropertyInteger", "CopiesX",
"Path", "The number of copies in X direction in Linear pattern")
obj.addProperty("App::PropertyInteger", "CopiesY",
"Path", "The number of copies in Y direction in Linear pattern")
obj.addProperty("App::PropertyAngle", "Angle",
"Path", "Total angle in Polar pattern")
obj.addProperty("App::PropertyInteger", "Copies",
"Path", "The number of copies in Linear 1D and Polar pattern")
obj.addProperty("App::PropertyVector", "Centre",
"Path", "The centre of rotation in Polar pattern")
obj.addProperty("App::PropertyLink", "ToolController",
"Path", QtCore.QT_TRANSLATE_NOOP("App::Property", "The tool controller that will be used to calculate the path"))
obj.Type = ['Linear1D', 'Linear2D', 'Polar']
self.setEditorProperties(obj)
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def setEditorProperties(self, obj):
if obj.Type == 'Linear2D':
obj.setEditorMode('Angle', 2)
obj.setEditorMode('Copies', 2)
obj.setEditorMode('Centre', 2)
obj.setEditorMode('CopiesX', 0)
obj.setEditorMode('CopiesY', 0)
obj.setEditorMode('Offset', 0)
elif obj.Type == 'Polar':
obj.setEditorMode('Angle', 0)
obj.setEditorMode('Copies', 0)
obj.setEditorMode('Centre', 0)
obj.setEditorMode('CopiesX', 2)
obj.setEditorMode('CopiesY', 2)
obj.setEditorMode('Offset', 2)
elif obj.Type == 'Linear1D':
obj.setEditorMode('Angle', 2)
obj.setEditorMode('Copies', 0)
obj.setEditorMode('Centre', 2)
obj.setEditorMode('CopiesX', 2)
obj.setEditorMode('CopiesY', 2)
obj.setEditorMode('Offset', 0)
def onChanged(self, obj, prop):
if prop == "Type":
self.setEditorProperties(obj)
def rotatePath(self, path, angle, centre):
'''
Rotates Path around given centre vector
Only X and Y is considered
'''
CmdMoveRapid = ['G0', 'G00']
CmdMoveStraight = ['G1', 'G01']
CmdMoveCW = ['G2', 'G02']
CmdMoveCCW = ['G3', 'G03']
CmdDrill = ['G81', 'G82', 'G83']
CmdMoveArc = CmdMoveCW + CmdMoveCCW
CmdMove = CmdMoveStraight + CmdMoveArc
commands = []
ang = angle / 180 * math.pi
currX = 0
currY = 0
for cmd in path.Commands:
if (cmd.Name in CmdMoveRapid) or (cmd.Name in CmdMove) or (cmd.Name in CmdDrill):
params = cmd.Parameters
x = params.get("X")
if x is None:
x = currX
currX = x
y = params.get("Y")
if y is None:
y = currY
currY = y
# "move" the centre to origin
x = x - centre.x
y = y - centre.y
# rotation around origin:
nx = x * math.cos(ang) - y * math.sin(ang)
ny = y * math.cos(ang) + x * math.sin(ang)
# "move" the centre back and update
params.update({'X': nx + centre.x, 'Y': ny + centre.y})
# Arcs need to have the I and J params rotated as well
if cmd.Name in CmdMoveArc:
i = params.get("I")
if i is None:
i = 0
j = params.get("J")
if j is None:
j = 0
ni = i * math.cos(ang) - j * math.sin(ang)
nj = j * math.cos(ang) + i * math.sin(ang)
params.update({'I': ni, 'J': nj})
cmd.Parameters = params
commands.append(cmd)
newPath = Path.Path(commands)
return newPath
def execute(self, obj):
if obj.Base:
if not obj.Base.isDerivedFrom("Path::Feature"):
return
if not obj.Base.Path:
return
if not obj.Base.ToolController:
return
obj.ToolController = obj.Base.ToolController
# build copies
basepath = obj.Base.Path
output = ""
if obj.Type == 'Linear1D':
for i in range(obj.Copies):
pl = FreeCAD.Placement()
pos = FreeCAD.Vector(obj.Offset.x * (i + 1), obj.Offset.y * (i + 1), 0)
pl.move(pos)
np = Path.Path([cm.transform(pl)
for cm in basepath.Commands])
output += np.toGCode()
elif obj.Type == 'Linear2D':
for i in range(obj.CopiesX + 1):
for j in range(obj.CopiesY + 1):
pl = FreeCAD.Placement()
# do not process the index 0,0. It will be processed at basepath
if not (i == 0 and j == 0):
if (i % 2) == 0:
pos = FreeCAD.Vector(obj.Offset.x * i, obj.Offset.y * j, 0)
else:
pos = FreeCAD.Vector(obj.Offset.x * i, obj.Offset.y * (obj.CopiesY - j), 0)
pl.move(pos)
np = Path.Path([cm.transform(pl)
for cm in basepath.Commands])
output += np.toGCode()
else:
for i in range(obj.Copies):
ang = 360
if obj.Copies > 0:
ang = obj.Angle / obj.Copies * (1 + i)
np = self.rotatePath(basepath, ang, obj.Centre)
output += np.toGCode()
# print output
path = Path.Path(output)
obj.Path = path
class ViewProviderArray:
def __init__(self, vobj):
self.Object = vobj.Object
vobj.Proxy = self
def attach(self, vobj):
self.Object = vobj.Object
return
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def claimChildren(self):
if hasattr(self, "Object"):
if hasattr(self.Object, "Base"):
if self.Object.Base:
return self.Object.Base
return []
class CommandPathArray:
def GetResources(self):
return {'Pixmap': 'Path_Array',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Array", "Array"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Array", "Creates an array from a selected path")}
def IsActive(self):
if bool(FreeCADGui.Selection.getSelection()) is False:
return False
try:
obj = FreeCADGui.Selection.getSelectionEx()[0].Object
return isinstance(obj.Proxy, PathScripts.PathOp.ObjectOp)
except(IndexError, AttributeError):
return False
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelection()
if len(selection) != 1:
FreeCAD.Console.PrintError(
translate("Path_Array", "Please select exactly one path object")+"\n")
return
if not(selection[0].isDerivedFrom("Path::Feature")):
FreeCAD.Console.PrintError(
translate("Path_Array", "Please select exactly one path object")+"\n")
return
# if everything is ok, execute and register the transaction in the
# undo/redo stack
FreeCAD.ActiveDocument.openTransaction("Create Array")
FreeCADGui.addModule("PathScripts.PathArray")
FreeCADGui.addModule("PathScripts.PathUtils")
FreeCADGui.doCommand(
'obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Array")')
FreeCADGui.doCommand('PathScripts.PathArray.ObjectArray(obj)')
FreeCADGui.doCommand(
'obj.Base = (FreeCAD.ActiveDocument.' + selection[0].Name + ')')
# FreeCADGui.doCommand('PathScripts.PathArray.ViewProviderArray(obj.ViewObject)')
FreeCADGui.doCommand('obj.ViewObject.Proxy = 0')
FreeCADGui.doCommand('PathScripts.PathUtils.addToJob(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Array', CommandPathArray())
| lgpl-2.1 |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/plugins/atresplayer.py | 4 | 2806 | from __future__ import print_function
import logging
import re
from functools import partial
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream, DASHStream
from streamlink.utils import parse_json, update_scheme, search_dict
log = logging.getLogger(__name__)
class AtresPlayer(Plugin):
url_re = re.compile(r"https?://(?:www.)?atresplayer.com/")
state_re = re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL)
channel_id_schema = validate.Schema(
validate.transform(state_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_json),
validate.transform(partial(search_dict, key="href")),
)
)
)
player_api_schema = validate.Schema(
validate.any(
None,
validate.all(
validate.transform(parse_json),
validate.transform(partial(search_dict, key="urlVideo")),
)
)
)
stream_schema = validate.Schema(
validate.transform(parse_json),
{"sources": [
validate.all({
"src": validate.url(),
validate.optional("type"): validate.text
})
]}, validate.get("sources"))
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def __init__(self, url):
# must be HTTPS
super(AtresPlayer, self).__init__(update_scheme("https://", url))
def _get_streams(self):
api_urls = self.session.http.get(self.url, schema=self.channel_id_schema)
_api_url = list(api_urls)[0]
log.debug("API URL: {0}".format(_api_url))
player_api_url = self.session.http.get(_api_url, schema=self.player_api_schema)
for api_url in player_api_url:
log.debug("Player API URL: {0}".format(api_url))
for source in self.session.http.get(api_url, schema=self.stream_schema):
log.debug("Stream source: {0} ({1})".format(source['src'], source.get("type", "n/a")))
if "type" not in source or source["type"] == "application/vnd.apple.mpegurl":
streams = HLSStream.parse_variant_playlist(self.session, source["src"])
if not streams:
yield "live", HLSStream(self.session, source["src"])
else:
for s in streams.items():
yield s
elif source["type"] == "application/dash+xml":
for s in DASHStream.parse_manifest(self.session, source["src"]).items():
yield s
__plugin__ = AtresPlayer
| gpl-2.0 |
killbug2004/peinjector | pe-injector-interceptor/libPePatch.py | 34 | 4600 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Provides de-serialization and in-stream patch applying capabilities for PE Files
"""
__author__ = 'A.A.'
# Unpack binary data
from struct import unpack_from
# Holds an single patch part
class PePatchPart(object):
# Constructor
def __init__(self, mem, position, insert):
self.mem = mem
self.next = None
self.position = position
self.insert = insert
self.finished = False
# Deserialize and applies patches on pe files
class PePatch(object):
# Sentinel size
pepatch_sentinelsize = 9;
# First Patch part
first = None
# Constructor
def __init__(self, serialized_memory):
serialized_mem_size = len(serialized_memory)
current_position = 0
current = None
patch = None
# Deserialize data
while (serialized_mem_size - current_position) >= self.pepatch_sentinelsize:
mem_size, position, insert = unpack_from("<II?", serialized_memory, current_position)
# 2*sizeof(uint32_t) + sizeof(uint8_t)
current_position += 9
# Length Error
if (serialized_mem_size - current_position) < mem_size:
return
# Extract Data
patch_data = serialized_memory[current_position:current_position + mem_size]
# Change Position
current_position += mem_size
# Add Patch
if mem_size > 0:
patch = PePatchPart(patch_data, position, insert)
else:
patch = None
# Build chain
if current is not None:
current.next = patch
if self.first is None:
self.first = patch
current = patch
# Length Error
if (serialized_mem_size - current_position) > 0:
self.first = None
return
# Patch is ok
def patch_ok(self):
return self.first is not None
# Apply patch on stream data
def apply_patch(self, mem, position):
all_finished = True
# Nothing to patch
if self.first is None:
return mem
# Apply each patch part
current = self.first
while current is not None:
# Finished, no need to check
if current.finished:
current = current.next
continue
# Patch starts inside memory
if position <= current.position < (position + len(mem)):
delta_position = current.position - position
# Insert
if current.insert:
mem = mem[:delta_position] + current.mem + mem[delta_position:]
# Patch part finished
current.finished = True
# Overwrite
else:
mem = mem[:delta_position] + current.mem[:len(mem)-delta_position] \
+ mem[delta_position+len(current.mem):]
# Patch applied
all_finished = False
# Append after current mem part (important if current part is the last part)
elif current.insert and (current.position == (position + len(mem))):
# Append patch
mem = mem + current.mem
# Patch part finished
current.finished = True
# Patch applied
all_finished = False
# Patch starts before memory
elif (not current.insert) and ((current.position + len(current.mem)) > position)\
and (current.position < position):
delta_position = position - current.position
mem = current.mem[delta_position:delta_position+len(mem)] + mem[len(current.mem)-delta_position:]
# Patch applied
all_finished = False
# Patch finished
elif (current.position + len(current.mem)) < position:
current.finished = True
# Reset total finished
else:
# Patch waiting
all_finished = False
# Next patch part
current = current.next
# Patch finished
if all_finished:
self.first = None
# Return patched memory
return mem
| unlicense |
CERNDocumentServer/invenio | modules/webjournal/lib/webjournal_unit_tests.py | 3 | 2784 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebJournal."""
__revision__ = \
"$Id$"
# pylint invenio/modules/webjournal/lib/webjournal_tests.py
from invenio.testutils import InvenioTestCase
from invenio.webjournal_utils import compare_issues
from invenio.webjournal import issue_is_later_than
#from invenio import webjournal_utils
from invenio.testutils import make_test_suite, run_test_suite
#from invenio.config import CFG_SITE_URL
class TestCompareIssues(InvenioTestCase):
"""Tests for comparing issues."""
def test_compare_issues(self):
"""webjournal - tests comparing issues"""
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), 0)
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
def test_issue1_is_later_than(self):
"""webjournal - tests comparing issue1 is later than issue2 """
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
TEST_SUITE = make_test_suite(TestCompareIssues)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
etxc/namebench | nb_third_party/dns/rdtypes/ANY/CNAME.py | 248 | 1092 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class CNAME(dns.rdtypes.nsbase.NSBase):
"""CNAME record
Note: although CNAME is officially a singleton type, dnspython allows
non-singleton CNAME rdatasets because such sets have been commonly
used by BIND and other nameservers for load balancing."""
pass
| apache-2.0 |
CuriousLearner/standup | src/updates/views.py | 1 | 1310 | from django.shortcuts import render, get_object_or_404
from django.views.generic import DetailView
from authen.models import User
from fraternity.models import Team, Project
from .models import Post, Hashtag
class GetUserPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
user = get_object_or_404(User, username=self.kwargs['username'])
return self.model.objects.filter(posted_by=user)
class GetTeamPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
team = get_object_or_404(Team, slug=self.kwargs['team'])
return self.model.objects.filter(team=team)
class GetProjectPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
project = get_object_or_404(Project, slug=self.kwargs['project'])
return self.model.objects.filter(project=project)
class GetHashtagPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
hashtag = get_object_or_404(Hashtag, content=self.kwargs['hashtag'])
return self.model.objects.filter(hashtags=hashtag)
| gpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/contrib/text/python/ops/skip_gram_ops.py | 76 | 21608 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops from https://arxiv.org/abs/1301.3781."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from tensorflow.contrib import lookup
from tensorflow.contrib.text.python.ops import gen_skip_gram_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import input as input_ops
_checkpoint_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_skip_gram_ops.so"))
ops.NotDifferentiable("SkipGramGenerateCandidates")
def skip_gram_sample(input_tensor,
min_skips=1,
max_skips=5,
start=0,
limit=-1,
emit_self_as_target=False,
vocab_freq_table=None,
vocab_min_count=None,
vocab_subsampling=None,
corpus_size=None,
batch_size=None,
batch_capacity=None,
seed=None,
name=None):
"""Generates skip-gram token and label paired Tensors from the input tensor.
Generates skip-gram `("token", "label")` pairs using each element in the
rank-1 `input_tensor` as a token. The window size used for each token will be
randomly selected from the range specified by `[min_skips, max_skips]`,
inclusive. See https://arxiv.org/abs/1301.3781 for more details about
skip-gram.
For example, given `input_tensor = ["the", "quick", "brown", "fox", "jumps"]`,
`min_skips = 1`, `max_skips = 2`, `emit_self_as_target = False`, the output
`(tokens, labels)` pairs for the token "quick" will be randomly selected from
either `(tokens=["quick", "quick"], labels=["the", "brown"])` for 1 skip, or
`(tokens=["quick", "quick", "quick"], labels=["the", "brown", "fox"])` for 2
skips.
If `emit_self_as_target = True`, each token will also be emitted as a label
for itself. From the previous example, the output will be either
`(tokens=["quick", "quick", "quick"], labels=["the", "quick", "brown"])` for 1
skip, or `(tokens=["quick", "quick", "quick", "quick"], labels=["the",
"quick", "brown", "fox"])` for 2 skips.
The same process is repeated for each element of `input_tensor` and
concatenated together into the two output rank-1 `Tensors` (one for all the
tokens, another for all the labels).
If `vocab_freq_table` is specified, tokens in `input_tensor` that are not
present in the vocabulary are discarded. Tokens whose frequency counts are
below `vocab_min_count` are also discarded. Tokens whose frequency proportions
in the corpus exceed `vocab_subsampling` may be randomly down-sampled. See
Eq. 5 in http://arxiv.org/abs/1310.4546 for more details about subsampling.
Due to the random window sizes used for each token, the lengths of the outputs
are non-deterministic, unless `batch_size` is specified to batch the outputs
to always return `Tensors` of length `batch_size`.
Args:
input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates.
min_skips: `int` or scalar `Tensor` specifying the minimum window size to
randomly use for each token. Must be >= 0 and <= `max_skips`. If
`min_skips` and `max_skips` are both 0, the only label outputted will be
the token itself when `emit_self_as_target = True` - or no output
otherwise.
max_skips: `int` or scalar `Tensor` specifying the maximum window size to
randomly use for each token. Must be >= 0.
start: `int` or scalar `Tensor` specifying the position in
`input_tensor` from which to start generating skip-gram candidates.
limit: `int` or scalar `Tensor` specifying the maximum number of
elements in `input_tensor` to use in generating skip-gram candidates. -1
means to use the rest of the `Tensor` after `start`.
emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit
each token as a label for itself.
vocab_freq_table: (Optional) A lookup table (subclass of
`lookup.InitializableLookupTableBase`) that maps tokens to their raw
frequency counts. If specified, any token in `input_tensor` that is not
found in `vocab_freq_table` will be filtered out before generating
skip-gram candidates. While this will typically map to integer raw
frequency counts, it could also map to float frequency proportions.
`vocab_min_count` and `corpus_size` should be in the same units as this.
vocab_min_count: (Optional) `int`, `float`, or scalar `Tensor` specifying
minimum frequency threshold (from `vocab_freq_table`) for a token to be
kept in `input_tensor`. If this is specified, `vocab_freq_table` must also
be specified - and they should both be in the same units.
vocab_subsampling: (Optional) `float` specifying frequency proportion
threshold for tokens from `input_tensor`. Tokens that occur more
frequently (based on the ratio of the token's `vocab_freq_table` value to
the `corpus_size`) will be randomly down-sampled. Reasonable starting
values may be around 1e-3 or 1e-5. If this is specified, both
`vocab_freq_table` and `corpus_size` must also be specified. See Eq. 5
in http://arxiv.org/abs/1310.4546 for more details.
corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the
total number of tokens in the corpus (e.g., sum of all the frequency
counts of `vocab_freq_table`). Used with `vocab_subsampling` for
down-sampling frequently occurring tokens. If this is specified,
`vocab_freq_table` and `vocab_subsampling` must also be specified.
batch_size: (Optional) `int` specifying batch size of returned `Tensors`.
batch_capacity: (Optional) `int` specifying batch capacity for the queue
used for batching returned `Tensors`. Only has an effect if
`batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
seed: (Optional) `int` used to create a random seed for window size and
subsampling. See `set_random_seed` docs for behavior.
name: (Optional) A `string` name or a name scope for the operations.
Returns:
A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
length `batch_size`; if `batch_size` is not specified, they will be of
random length, though they will be in sync with each other as long as they
are evaluated together.
Raises:
ValueError: If `vocab_freq_table` is not provided, but `vocab_min_count`,
`vocab_subsampling`, or `corpus_size` is specified. If `vocab_subsampling`
and `corpus_size` are not both present or both absent.
"""
if vocab_freq_table is None and (vocab_min_count is not None or
vocab_subsampling is not None or
corpus_size is not None):
raise ValueError(
"vocab_freq_table is not provided, but vocab_min_count={}, "
"vocab_subsampling={}, or corpus_size={} is not None. These settings "
"are useless without a vocab_freq_table.".format(
vocab_min_count, vocab_subsampling, corpus_size))
if (vocab_subsampling is None) != (corpus_size is None):
raise ValueError(
"vocab_subsampling is {} while corpus_size is {} - both must be "
"provided in order for subsampling to work.".format(
vocab_subsampling, corpus_size))
with ops.name_scope(
name,
"skip_gram_sample",
values=[input_tensor, min_skips, max_skips, start, limit]):
input_tensor = _filter_input(
input_tensor=input_tensor,
vocab_freq_table=vocab_freq_table,
vocab_min_count=vocab_min_count,
vocab_subsampling=vocab_subsampling,
corpus_size=corpus_size,
seed=seed)
seed1, seed2 = random_seed.get_seed(seed)
tokens, labels = gen_skip_gram_ops.skip_gram_generate_candidates(
input_tensor=input_tensor,
min_skips=min_skips,
max_skips=max_skips,
start=start,
limit=limit,
emit_self_as_target=emit_self_as_target,
# Note that seed here should be seed1! This is due to
# GuardedPhiloxRandom's hard-coded attributes of "seed" and "seed2".
seed=seed1,
seed2=seed2)
# TODO(weiho): If the need arises, add support for sparse input_tensor that
# figures out sentence boundaries, then calls
# skip_gram_generate_candidates() on each sentence.
# Batches the (tokens, labels) outputs so that they will be of deterministic
# batch_size, to facilitate feeding them into the rest of the network.
if batch_size is not None and batch_size > 0:
batch_capacity = (batch_capacity
if (batch_capacity is not None and batch_capacity > 0)
else 100 * batch_size)
return input_ops.batch(
[tokens, labels],
batch_size,
capacity=batch_capacity,
enqueue_many=True)
return tokens, labels
def skip_gram_sample_with_text_vocab(input_tensor,
vocab_freq_file,
vocab_token_index=0,
vocab_token_dtype=dtypes.string,
vocab_freq_index=1,
vocab_freq_dtype=dtypes.float64,
vocab_delimiter=",",
vocab_min_count=0,
vocab_subsampling=None,
corpus_size=None,
min_skips=1,
max_skips=5,
start=0,
limit=-1,
emit_self_as_target=False,
batch_size=None,
batch_capacity=None,
seed=None,
name=None):
"""Skip-gram sampling with a text vocabulary file.
Wrapper around `skip_gram_sample()` for use with a text vocabulary file. The
vocabulary file is expected to be a plain-text file, with lines of
`vocab_delimiter`-separated columns. The `vocab_token_index` column should
contain the vocabulary term, while the `vocab_freq_index` column should
contain the number of times that term occurs in the corpus. For example, with
a text vocabulary file of:
```
bonjour,fr,42
hello,en,777
hola,es,99
```
You should set `vocab_delimiter=","`, `vocab_token_index=0`, and
`vocab_freq_index=2`.
See `skip_gram_sample()` documentation for more details about the skip-gram
sampling process.
Args:
input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates.
vocab_freq_file: `string` specifying full file path to the text vocab file.
vocab_token_index: `int` specifying which column in the text vocab file
contains the tokens.
vocab_token_dtype: `DType` specifying the format of the tokens in the text
vocab file.
vocab_freq_index: `int` specifying which column in the text vocab file
contains the frequency counts of the tokens.
vocab_freq_dtype: `DType` specifying the format of the frequency counts in
the text vocab file.
vocab_delimiter: `string` specifying the delimiter used in the text vocab
file.
vocab_min_count: `int`, `float`, or scalar `Tensor` specifying
minimum frequency threshold (from `vocab_freq_file`) for a token to be
kept in `input_tensor`. This should correspond with `vocab_freq_dtype`.
vocab_subsampling: (Optional) `float` specifying frequency proportion
threshold for tokens from `input_tensor`. Tokens that occur more
frequently will be randomly down-sampled. Reasonable starting values may
be around 1e-3 or 1e-5. See Eq. 5 in http://arxiv.org/abs/1310.4546 for
more details.
corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the
total number of tokens in the corpus (e.g., sum of all the frequency
counts of `vocab_freq_file`). Used with `vocab_subsampling` for
down-sampling frequently occurring tokens. If this is specified,
`vocab_freq_file` and `vocab_subsampling` must also be specified.
If `corpus_size` is needed but not supplied, then it will be calculated
from `vocab_freq_file`. You might want to supply your own value if you
have already eliminated infrequent tokens from your vocabulary files
(where frequency < vocab_min_count) to save memory in the internal token
lookup table. Otherwise, the unused tokens' variables will waste memory.
The user-supplied `corpus_size` value must be greater than or equal to the
sum of all the frequency counts of `vocab_freq_file`.
min_skips: `int` or scalar `Tensor` specifying the minimum window size to
randomly use for each token. Must be >= 0 and <= `max_skips`. If
`min_skips` and `max_skips` are both 0, the only label outputted will be
the token itself.
max_skips: `int` or scalar `Tensor` specifying the maximum window size to
randomly use for each token. Must be >= 0.
start: `int` or scalar `Tensor` specifying the position in `input_tensor`
from which to start generating skip-gram candidates.
limit: `int` or scalar `Tensor` specifying the maximum number of elements in
`input_tensor` to use in generating skip-gram candidates. -1 means to use
the rest of the `Tensor` after `start`.
emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit
each token as a label for itself.
batch_size: (Optional) `int` specifying batch size of returned `Tensors`.
batch_capacity: (Optional) `int` specifying batch capacity for the queue
used for batching returned `Tensors`. Only has an effect if
`batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
seed: (Optional) `int` used to create a random seed for window size and
subsampling. See
[`set_random_seed`](../../g3doc/python/constant_op.md#set_random_seed)
for behavior.
name: (Optional) A `string` name or a name scope for the operations.
Returns:
A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
length `batch_size`; if `batch_size` is not specified, they will be of
random length, though they will be in sync with each other as long as they
are evaluated together.
Raises:
ValueError: If `vocab_token_index` or `vocab_freq_index` is less than 0 or
exceeds the number of columns in `vocab_freq_file`. If `vocab_token_index`
and `vocab_freq_index` are both set to the same column. If any token in
`vocab_freq_file` has a negative frequency.
"""
if vocab_token_index < 0 or vocab_freq_index < 0:
raise ValueError(
"vocab_token_index={} and vocab_freq_index={} must both be >= 0.".
format(vocab_token_index, vocab_freq_index))
if vocab_token_index == vocab_freq_index:
raise ValueError(
"vocab_token_index and vocab_freq_index should be different, but are "
"both {}.".format(vocab_token_index))
# Iterates through the vocab file and calculates the number of vocab terms as
# well as the total corpus size (by summing the frequency counts of all the
# vocab terms).
calculated_corpus_size = 0.0
vocab_size = 0
with gfile.GFile(vocab_freq_file, mode="r") as f:
reader = csv.reader(f, delimiter=vocab_delimiter)
for row in reader:
if vocab_token_index >= len(row) or vocab_freq_index >= len(row):
raise ValueError(
"Row in vocab file only has {} columns, so vocab_token_index={} or "
"vocab_freq_index={} is out of bounds. Row content: {}".format(
len(row), vocab_token_index, vocab_freq_index, row))
vocab_size += 1
freq = vocab_freq_dtype.as_numpy_dtype(row[vocab_freq_index])
if freq < 0:
raise ValueError(
"Row in vocab file has negative frequency of {}. Row content: {}".
format(freq, row))
# Note: tokens whose frequencies are below vocab_min_count will still
# contribute to the total corpus size used for vocab subsampling.
calculated_corpus_size += freq
if not corpus_size:
corpus_size = calculated_corpus_size
elif calculated_corpus_size - corpus_size > 1e-6:
raise ValueError(
"`corpus_size`={} must be greater than or equal to the sum of all the "
"frequency counts ({}) of `vocab_freq_file` ({}).".format(
corpus_size, calculated_corpus_size, vocab_freq_file))
vocab_freq_table = lookup.HashTable(
lookup.TextFileInitializer(
filename=vocab_freq_file,
key_dtype=vocab_token_dtype,
key_index=vocab_token_index,
value_dtype=vocab_freq_dtype,
value_index=vocab_freq_index,
vocab_size=vocab_size,
delimiter=vocab_delimiter),
# For vocab terms not in vocab file, use a default value of -1.
default_value=-1)
return skip_gram_sample(
input_tensor,
min_skips=min_skips,
max_skips=max_skips,
start=start,
limit=limit,
emit_self_as_target=emit_self_as_target,
vocab_freq_table=vocab_freq_table,
vocab_min_count=vocab_min_count,
vocab_subsampling=vocab_subsampling,
# corpus_size is not used unless vocab_subsampling is specified.
corpus_size=None if vocab_subsampling is None else corpus_size,
batch_size=batch_size,
batch_capacity=batch_capacity,
seed=seed,
name=name)
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
vocab_subsampling, corpus_size, seed):
"""Filters input tensor based on vocab freq, threshold, and subsampling."""
if vocab_freq_table is None:
return input_tensor
if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
raise ValueError(
"vocab_freq_table must be a subclass of "
"InitializableLookupTableBase (such as HashTable) instead of type "
"{}.".format(type(vocab_freq_table)))
with ops.name_scope(
"filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
freq = vocab_freq_table.lookup(input_tensor)
# Filters out elements in input_tensor that are not found in
# vocab_freq_table (table returns a default value of -1 specified above when
# an element is not found).
mask = math_ops.not_equal(freq, vocab_freq_table.default_value)
# Filters out elements whose vocab frequencies are less than the threshold.
if vocab_min_count is not None:
cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
mask = math_ops.logical_and(mask,
math_ops.greater_equal(freq, cast_threshold))
input_tensor = array_ops.boolean_mask(input_tensor, mask)
freq = array_ops.boolean_mask(freq, mask)
if not vocab_subsampling:
return input_tensor
if vocab_subsampling < 0 or vocab_subsampling > 1:
raise ValueError(
"Invalid vocab_subsampling={} - it should be within range [0, 1].".
format(vocab_subsampling))
# Subsamples the input tokens based on vocabulary frequency and
# vocab_subsampling threshold (ie randomly discard commonly appearing
# tokens).
with ops.name_scope(
"subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
corpus_size = math_ops.cast(corpus_size, dtypes.float64)
freq = math_ops.cast(freq, dtypes.float64)
vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)
# From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
# suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
keep_prob = ((math_ops.sqrt(freq /
(vocab_subsampling * corpus_size)) + 1.0) *
(vocab_subsampling * corpus_size / freq))
random_prob = random_ops.random_uniform(
array_ops.shape(freq),
minval=0,
maxval=1,
dtype=dtypes.float64,
seed=seed)
mask = math_ops.less_equal(random_prob, keep_prob)
return array_ops.boolean_mask(input_tensor, mask)
| apache-2.0 |
epfl-idevelop/jahia2wp | src/parser/sitemap_node.py | 1 | 1306 | from anytree import Node, RenderTree
class SitemapNode(Node):
"""
A SitemapNode represents a node of the sitemap. The root node
(the homepage) is available as a property of the Site class,
e.g. site.sitemaps["en"] for the English sitemap. This class
is an extension of Node, from the anytree library:
https://pypi.python.org/pypi/anytree/1.0.1
A SitemapNode can reference two types of pages:
1. Internal pages, in which case the "page" property is the Page itself and the
"ref" property is the Page's UUID.
2. External pages, in which case the "page" property is None and the
"ref" property is the external URL, e.g. https://www.google.com.
"""
def __init__(self, name, page, ref, parent=None):
super().__init__(name, parent)
self.page = page
self.ref = ref
def print_node(self):
"""Print the node"""
for pre, fill, node in RenderTree(self):
print("%s%s" % (pre, node.name))
@classmethod
def from_navigation_page(cls, navigation_page, parent):
"""Create a SitemapNode from a NavigationPage"""
return SitemapNode(
name=navigation_page.title,
page=navigation_page.page,
ref=navigation_page.ref,
parent=parent)
| mit |
Vimos/scikit-learn | sklearn/preprocessing/_function_transformer.py | 41 | 3475 | from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
def _identity(X):
"""The identity function.
"""
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
A FunctionTransformer will not do any checks on its function's output.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <function_transformer>`.
Parameters
----------
func : callable, optional default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, optional default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
validate : bool, optional default=True
Indicate that the input X array should be checked before calling
func. If validate is false, there will be no input validation.
If it is true, then X will be converted to a 2-dimensional NumPy
array or sparse matrix. If this conversion is not possible or X
contains NaN or infinity, an exception is raised.
accept_sparse : boolean, optional
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
pass_y : bool, optional default=False
Indicate that transform should forward the y argument to the
inner callable.
kw_args : dict, optional
Dictionary of additional keyword arguments to pass to func.
inv_kw_args : dict, optional
Dictionary of additional keyword arguments to pass to inverse_func.
"""
def __init__(self, func=None, inverse_func=None, validate=True,
accept_sparse=False, pass_y=False,
kw_args=None, inv_kw_args=None):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.pass_y = pass_y
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def fit(self, X, y=None):
if self.validate:
check_array(X, self.accept_sparse)
return self
def transform(self, X, y=None):
return self._transform(X, y, self.func, self.kw_args)
def inverse_transform(self, X, y=None):
return self._transform(X, y, self.inverse_func, self.inv_kw_args)
def _transform(self, X, y=None, func=None, kw_args=None):
if self.validate:
X = check_array(X, self.accept_sparse)
if func is None:
func = _identity
return func(X, *((y,) if self.pass_y else ()),
**(kw_args if kw_args else {}))
| bsd-3-clause |
lfcnassif/MultiContentViewer | release/modules/ext/libreoffice/program/python-core-3.3.0/lib/email/_policybase.py | 94 | 14327 | """Policy framework for the email package.
Allows fine grained feature control of how the package parses and emits data.
"""
import abc
from email import header
from email import charset as _charset
from email.utils import _has_surrogates
__all__ = [
'Policy',
'Compat32',
'compat32',
]
class _PolicyBase:
"""Policy Object basic framework.
This class is useless unless subclassed. A subclass should define
class attributes with defaults for any values that are to be
managed by the Policy object. The constructor will then allow
non-default values to be set for these attributes at instance
creation time. The instance will be callable, taking these same
attributes keyword arguments, and returning a new instance
identical to the called instance except for those values changed
by the keyword arguments. Instances may be added, yielding new
instances with any non-default values from the right hand
operand overriding those in the left hand operand. That is,
A + B == A(<non-default values of B>)
The repr of an instance can be used to reconstruct the object
if and only if the repr of the values can be used to reconstruct
those values.
"""
def __init__(self, **kw):
"""Create new Policy, possibly overriding some defaults.
See class docstring for a list of overridable attributes.
"""
for name, value in kw.items():
if hasattr(self, name):
super(_PolicyBase,self).__setattr__(name, value)
else:
raise TypeError(
"{!r} is an invalid keyword argument for {}".format(
name, self.__class__.__name__))
def __repr__(self):
args = [ "{}={!r}".format(name, value)
for name, value in self.__dict__.items() ]
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def clone(self, **kw):
"""Return a new instance with specified attributes changed.
The new instance has the same attribute values as the current object,
except for the changes passed in as keyword arguments.
"""
newpolicy = self.__class__.__new__(self.__class__)
for attr, value in self.__dict__.items():
object.__setattr__(newpolicy, attr, value)
for attr, value in kw.items():
if not hasattr(self, attr):
raise TypeError(
"{!r} is an invalid keyword argument for {}".format(
attr, self.__class__.__name__))
object.__setattr__(newpolicy, attr, value)
return newpolicy
def __setattr__(self, name, value):
if hasattr(self, name):
msg = "{!r} object attribute {!r} is read-only"
else:
msg = "{!r} object has no attribute {!r}"
raise AttributeError(msg.format(self.__class__.__name__, name))
def __add__(self, other):
"""Non-default values from right operand override those from left.
The object returned is a new instance of the subclass.
"""
return self.clone(**other.__dict__)
def _append_doc(doc, added_doc):
doc = doc.rsplit('\n', 1)[0]
added_doc = added_doc.split('\n', 1)[1]
return doc + '\n' + added_doc
def _extend_docstrings(cls):
if cls.__doc__ and cls.__doc__.startswith('+'):
cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
for name, attr in cls.__dict__.items():
if attr.__doc__ and attr.__doc__.startswith('+'):
for c in (c for base in cls.__bases__ for c in base.mro()):
doc = getattr(getattr(c, name), '__doc__')
if doc:
attr.__doc__ = _append_doc(doc, attr.__doc__)
break
return cls
class Policy(_PolicyBase, metaclass=abc.ABCMeta):
r"""Controls for how messages are interpreted and formatted.
Most of the classes and many of the methods in the email package accept
Policy objects as parameters. A Policy object contains a set of values and
functions that control how input is interpreted and how output is rendered.
For example, the parameter 'raise_on_defect' controls whether or not an RFC
violation results in an error being raised or not, while 'max_line_length'
controls the maximum length of output lines when a Message is serialized.
Any valid attribute may be overridden when a Policy is created by passing
it as a keyword argument to the constructor. Policy objects are immutable,
but a new Policy object can be created with only certain values changed by
calling the Policy instance with keyword arguments. Policy objects can
also be added, producing a new Policy object in which the non-default
attributes set in the right hand operand overwrite those specified in the
left operand.
Settable attributes:
raise_on_defect -- If true, then defects should be raised as errors.
Default: False.
linesep -- string containing the value to use as separation
between output lines. Default '\n'.
cte_type -- Type of allowed content transfer encodings
7bit -- ASCII only
8bit -- Content-Transfer-Encoding: 8bit is allowed
Default: 8bit. Also controls the disposition of
(RFC invalid) binary data in headers; see the
documentation of the binary_fold method.
max_line_length -- maximum length of lines, excluding 'linesep',
during serialization. None or 0 means no line
wrapping is done. Default is 78.
"""
raise_on_defect = False
linesep = '\n'
cte_type = '8bit'
max_line_length = 78
def handle_defect(self, obj, defect):
"""Based on policy, either raise defect or call register_defect.
handle_defect(obj, defect)
defect should be a Defect subclass, but in any case must be an
Exception subclass. obj is the object on which the defect should be
registered if it is not raised. If the raise_on_defect is True, the
defect is raised as an error, otherwise the object and the defect are
passed to register_defect.
This method is intended to be called by parsers that discover defects.
The email package parsers always call it with Defect instances.
"""
if self.raise_on_defect:
raise defect
self.register_defect(obj, defect)
def register_defect(self, obj, defect):
"""Record 'defect' on 'obj'.
Called by handle_defect if raise_on_defect is False. This method is
part of the Policy API so that Policy subclasses can implement custom
defect handling. The default implementation calls the append method of
the defects attribute of obj. The objects used by the email package by
default that get passed to this method will always have a defects
attribute with an append method.
"""
obj.defects.append(defect)
def header_max_count(self, name):
"""Return the maximum allowed number of headers named 'name'.
Called when a header is added to a Message object. If the returned
value is not 0 or None, and there are already a number of headers with
the name 'name' equal to the value returned, a ValueError is raised.
Because the default behavior of Message's __setitem__ is to append the
value to the list of headers, it is easy to create duplicate headers
without realizing it. This method allows certain headers to be limited
in the number of instances of that header that may be added to a
Message programmatically. (The limit is not observed by the parser,
which will faithfully produce as many headers as exist in the message
being parsed.)
The default implementation returns None for all header names.
"""
return None
@abc.abstractmethod
def header_source_parse(self, sourcelines):
"""Given a list of linesep terminated strings constituting the lines of
a single header, return the (name, value) tuple that should be stored
in the model. The input lines should retain their terminating linesep
characters. The lines passed in by the email package may contain
surrogateescaped binary data.
"""
raise NotImplementedError
@abc.abstractmethod
def header_store_parse(self, name, value):
"""Given the header name and the value provided by the application
program, return the (name, value) that should be stored in the model.
"""
raise NotImplementedError
@abc.abstractmethod
def header_fetch_parse(self, name, value):
"""Given the header name and the value from the model, return the value
to be returned to the application program that is requesting that
header. The value passed in by the email package may contain
surrogateescaped binary data if the lines were parsed by a BytesParser.
The returned value should not contain any surrogateescaped data.
"""
raise NotImplementedError
@abc.abstractmethod
def fold(self, name, value):
"""Given the header name and the value from the model, return a string
containing linesep characters that implement the folding of the header
according to the policy controls. The value passed in by the email
package may contain surrogateescaped binary data if the lines were
parsed by a BytesParser. The returned value should not contain any
surrogateescaped data.
"""
raise NotImplementedError
@abc.abstractmethod
def fold_binary(self, name, value):
"""Given the header name and the value from the model, return binary
data containing linesep characters that implement the folding of the
header according to the policy controls. The value passed in by the
email package may contain surrogateescaped binary data.
"""
raise NotImplementedError
@_extend_docstrings
class Compat32(Policy):
"""+
This particular policy is the backward compatibility Policy. It
replicates the behavior of the email package version 5.1.
"""
def _sanitize_header(self, name, value):
# If the header value contains surrogates, return a Header using
# the unknown-8bit charset to encode the bytes as encoded words.
if not isinstance(value, str):
# Assume it is already a header object
return value
if _has_surrogates(value):
return header.Header(value, charset=_charset.UNKNOWN8BIT,
header_name=name)
else:
return value
def header_source_parse(self, sourcelines):
"""+
The name is parsed as everything up to the ':' and returned unmodified.
The value is determined by stripping leading whitespace off the
remainder of the first line, joining all subsequent lines together, and
stripping any trailing carriage return or linefeed characters.
"""
name, value = sourcelines[0].split(':', 1)
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
return (name, value.rstrip('\r\n'))
def header_store_parse(self, name, value):
"""+
The name and value are returned unmodified.
"""
return (name, value)
def header_fetch_parse(self, name, value):
"""+
If the value contains binary data, it is converted into a Header object
using the unknown-8bit charset. Otherwise it is returned unmodified.
"""
return self._sanitize_header(name, value)
def fold(self, name, value):
"""+
Headers are folded using the Header folding algorithm, which preserves
existing line breaks in the value, and wraps each resulting line to the
max_line_length. Non-ASCII binary data are CTE encoded using the
unknown-8bit charset.
"""
return self._fold(name, value, sanitize=True)
def fold_binary(self, name, value):
"""+
Headers are folded using the Header folding algorithm, which preserves
existing line breaks in the value, and wraps each resulting line to the
max_line_length. If cte_type is 7bit, non-ascii binary data is CTE
encoded using the unknown-8bit charset. Otherwise the original source
header is used, with its existing line breaks and/or binary data.
"""
folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
return folded.encode('ascii', 'surrogateescape')
def _fold(self, name, value, sanitize):
parts = []
parts.append('%s: ' % name)
if isinstance(value, str):
if _has_surrogates(value):
if sanitize:
h = header.Header(value,
charset=_charset.UNKNOWN8BIT,
header_name=name)
else:
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
parts.append(value)
h = None
else:
h = header.Header(value, header_name=name)
else:
# Assume it is a Header-like object.
h = value
if h is not None:
parts.append(h.encode(linesep=self.linesep,
maxlinelen=self.max_line_length))
parts.append(self.linesep)
return ''.join(parts)
compat32 = Compat32()
| lgpl-3.0 |
MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/3.x/Scripts/nxFileInventory.py | 2 | 14839 | #!/usr/bin/env python
# ====================================
# Copyright (c) Microsoft Corporation.
# All rights reserved.
# See license.txt for license information.
# ====================================
from __future__ import print_function
from __future__ import with_statement
from contextlib import contextmanager
import os
import pwd
import grp
import codecs
import fnmatch
import copy
import imp
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
try:
import hashlib
md5const = hashlib.md5
shaconst = hashlib.sha256
except ImportError: # Only sha-1 is available for python2.4.
import md5
md5const = md5.md5
import sha
shaconst = sha.sha
# [ClassVersion("1.0.0"), Description("The configuration provider for files and directories."), FriendlyName("nxFileInventory")]
# class MSFT_nxFileInventoryResource:OMI_BaseResource
# {
# [Key, InventoryFilter] string DestinationPath;
# [Write, InventoryFilter] boolean Recurse; //default = false
# [Write, InventoryFilter] boolean UseSudo; //default = false
# [Write, ValueMap{"follow", "manage", "ignore" }, Values{"follow", "manage", "ignore"},InventoryFilter] string Links; //default follow
# [Write, ValueMap{"md5", "sha-256", "mtime", "ctime"}, Values{"md5","sha-256","mtime","ctime"},InventoryFilter] string Checksum; //default md5
# [Write, ValueMap{"file", "directory", "*"},Values{"file", "directory","*"}, InventoryFilter] string Type; //default *
# [Write, InventoryFilter] uint32 MaxContentsReturnable; //default 1024 bytes
# [Write, InventoryFilter] uint64 MaxOutputSize; //default 10485760 bytes
# [Read] string Contents;
# [Read] datetime ModifiedDate;
# [Read] datetime CreatedDate;
# [Read] string Mode;
# [Read] string Group;
# [Read] string Owner;
# [Read] uint64 FileSize;
# };
#{'Links': u'ignore', 'MaxOutputSize': None, 'Checksum': u'md5', 'Recurse': None, 'MaxContentsReturnable': None, 'DestinationPath': u'/tmp', 'Type': u'directory'}
def init_locals(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
if DestinationPath is None :
DestinationPath = ''
if Recurse is None :
Recurse = False
if UseSudo is None :
UseSudo = False
if Links is None :
Links = 'follow'
if Checksum is None :
Checksum = 'md5'
if Type is None :
Type = '*'
if MaxContentsReturnable is None or MaxContentsReturnable < 0:
MaxContentsReturnable = 1024
if MaxOutputSize is None or MaxOutputSize < 0:
MaxOutputSize = 10485760
return DestinationPath, Recurse, Links.lower(), \
Checksum.lower(), Type.lower(), \
MaxContentsReturnable, MaxOutputSize, UseSudo
def Set_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
return [0]
def Test_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
return [0]
def Get_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
arg_names = list(locals().keys())
DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo \
= init_locals(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo)
retval = 0
DestinationPath = protocol.MI_String(DestinationPath)
Type = protocol.MI_String(Type)
MaxContentsReturnable = protocol.MI_Uint32(MaxContentsReturnable)
MaxOutputSize = protocol.MI_Uint64(MaxOutputSize)
Recurse = protocol.MI_Boolean(Recurse)
UseSudo = protocol.MI_Boolean(UseSudo)
Links = protocol.MI_String(Links)
Checksum = protocol.MI_String(Checksum)
Contents = protocol.MI_String('')
ModifiedDate = protocol.MI_Timestamp.from_time(0)
CreatedDate = protocol.MI_Timestamp.from_time(0)
Mode = protocol.MI_String('')
Group = protocol.MI_String('')
Owner = protocol.MI_String('')
FileSize = protocol.MI_Uint64(0)
arg_names.extend(['Contents', 'ModifiedDate', 'CreatedDate', 'Mode', 'Group', 'Owner', 'FileSize'])
retd = {}
ld = locals()
for k in arg_names :
retd[k] = ld[k]
return retval, retd
def Inventory_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo \
= init_locals(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo)
retval = 0
out_size_cur = 158 # xml output header + footer length.
xml_overhead_array_element = 99 # xml output overhead per Inventory array entry.
xml_overhead_param = 102 # xml output overhead per Inventory parameter.
_Inventory = []
Inventory = DoInventory(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo)
for d in Inventory:
if out_size_cur < MaxOutputSize:
out_size_cur += xml_overhead_array_element
for k,v in d.items():
out_size_cur += xml_overhead_param
if 'Date' in k:
out_size_cur += len(k) + 25 + 3 # The final date format wil be 25 chars, +3 for type tag.
else:
out_size_cur += len(k) + len(repr(v)) -2 # The repr(v) will add two quotes.
if out_size_cur >= MaxOutputSize:
break
d['DestinationPath'] = protocol.MI_String(d['DestinationPath'])
d['Checksum'] = protocol.MI_String(d['Checksum'])
d['Type'] = protocol.MI_String(d['Type'])
d['Contents'] = protocol.MI_String(str(MaxContentsReturnable))
d['ModifiedDate'] = protocol.MI_Timestamp.from_time(d['ModifiedDate'])
d['CreatedDate'] = protocol.MI_Timestamp.from_time(d['CreatedDate'])
d['Mode'] = protocol.MI_String(d['Mode'])
d['Group'] = protocol.MI_String(d['Group'])
d['Owner'] = protocol.MI_String(d['Owner'])
d['FileSize'] = protocol.MI_Uint64(d['FileSize'])
_Inventory.append(d)
_Inventory = protocol.MI_InstanceA(_Inventory)
retd = {}
retd["__Inventory"] = _Inventory
return retval, retd
def DoInventory(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
Inventory = []
full_path = DestinationPath.split('/')
if full_path[-1] == '':
full_path[-1] = '*'
wildcard_path = False
for p in full_path:
if '*' in p or '?' in p:
wildc_start=full_path.index(p)
wildcard_path = True
break
if wildcard_path:
top = '/' + os.path.join(*full_path[:wildc_start])
else :
top = '/' + os.path.join(*full_path)
if not os.path.exists(top):
print("Error: Unable to read 'DestinationPath': " + DestinationPath)
LG().Log("ERROR","Unable to read 'DestinationPath': " + DestinationPath)
return Inventory
if not wildcard_path:
if Links == 'ignore' and os.path.islink(top):
return Inventory
if Type != 'directory' and os.path.isfile(top): # This is s single file.
d = GetFileInfo(top, Links, MaxContentsReturnable, Checksum)
if 'DestinationPath' in d.keys():
Inventory.append(copy.deepcopy(d))
return Inventory
if '*' not in full_path[-1] and '?' not in full_path[-1]:
full_path.append('*') # It is a directory without the trailing '/', so add it.
dirs = set()
full_path_len = len(full_path)
for dirpath, dirnames, filenames in os.walk(top, followlinks=(Links == 'follow'), topdown=True):
dlen = len(dirpath.split('/'))
if dirpath.split('/')[-1] == '':
dlen -= 1
if wildcard_path and full_path_len >= dlen+1:
do_wildcard = True
else :
do_wildcard = False
st = os.stat(dirpath)
scandirs = []
if dlen+1 == full_path_len or ( Recurse and dlen >= full_path_len ):
for filename in filenames:
if (dlen+1 == full_path_len or ( Recurse and dlen >= full_path_len )) \
and not fnmatch.fnmatch(filename, full_path[-1]):
continue
if Type != 'directory':
d = GetFileInfo(os.path.join(dirpath, filename),\
Links, MaxContentsReturnable, Checksum)
if 'DestinationPath' in d.keys():
Inventory.append(copy.deepcopy(d))
for dirname in dirnames:
if not ( Recurse and dlen+1 >= full_path_len ):
if ( do_wildcard and not fnmatch.fnmatch(dirname, full_path[dlen]) ) or \
( not Recurse and dlen > full_path_len ):
continue
st = os.stat(os.path.join(dirpath, dirname)) # use Lstat if follow?
dirkey = st.st_dev, st.st_ino
if dirkey not in dirs:
if Recurse or (not Recurse and dlen+1 < full_path_len) :
dirs.add(dirkey)
scandirs.append(dirname)
if Type != 'file' and ( dlen+1 == full_path_len or ( Recurse and dlen >= full_path_len ) ) :
d = GetDirInfo(os.path.join(dirpath, dirname), st, Checksum, Links)
if 'DestinationPath' in d.keys():
Inventory.append(copy.deepcopy(d))
dirnames[:] = scandirs
return Inventory
def GetFileInfo(fname, Links, MaxContentsReturnable, Checksum):
"""
Return a dictionary of info for file.
If 'Links' == 'follow', no link files will appear here,
those links will be sent to GetDirInfo() as direcroties.
Therefore only LStatFile is used.
If file is link and 'Links' == 'ignore' {} is returned.
"""
fileContentChecksum = "@{{Algoritm={0} Hash={1} Path={2}}}"
d = {}
if fname.endswith("omsadmin.conf"):
return d
if os.path.islink(fname):
d['Type'] = 'link'
else :
d['Type'] = 'file'
if d['Type'] == 'link' and Links == 'ignore':
return {}
stat_info = None
stat_info = LStatFile(fname)
if stat_info == None:
return {}
d['DestinationPath'] = fname
try:
d['Owner'] = pwd.getpwuid(stat_info.st_uid).pw_name
except:
d['Owner'] = str(stat_info.st_uid)
try:
d['Group'] = grp.getgrgid(stat_info.st_gid).gr_name
except:
d['Group'] = str(stat_info.st_gid)
d['Mode'] = str(oct(stat_info.st_mode))[-3:]
d['ModifiedDate'] = int(stat_info.st_mtime)
d['CreatedDate'] = int(stat_info.st_ctime)
d['FileSize'] = stat_info.st_size
# if file size is 0
# dont attempt to read the file
if stat_info.st_size == 0:
d['Contents'] = ''
if Checksum == 'md5' or Checksum == 'sha-256':
d['Checksum'] = ""
elif Checksum == "ctime":
d['Checksum']= str(int(stat_info.st_ctime))
else : # Checksum == "mtime":
d['Checksum']= str(int(stat_info.st_mtime))
return d
if Checksum == 'md5' or Checksum == 'sha-256':
try:
fileHash = GetChecksum(fname,Checksum)
d['Checksum'] = fileContentChecksum.format(Checksum.upper(), fileHash.upper(), fname)
except:
d['Checksum'] = 0
elif Checksum == "ctime":
d['Checksum']= str(int(stat_info.st_ctime))
else : # Checksum == "mtime":
d['Checksum']= str(int(stat_info.st_mtime))
if d['Type'] == 'link' and Links == 'manage' :
d['Contents'] = 'Symlink to ' + os.readlink(fname)
else :
d['Contents'], error = ReadFileLimited(fname,MaxContentsReturnable)
if d['Contents'] is None:
d['Contents'] = ''
return d
def GetDirInfo(dname, stat_info, Checksum, Links):
"""
Return a dictionary of info for directory.
Only if 'Links' == 'follow' will links be
processed here as directories.
"""
d = {}
if stat_info == None:
return d
d['Type'] = 'directory'
d['DestinationPath'] = dname
try:
d['Owner'] = pwd.getpwuid(stat_info.st_uid).pw_name
except:
d['Owner'] = str(stat_info.st_uid)
try:
d['Group'] = grp.getgrgid(stat_info.st_gid).gr_name
except:
d['Group'] = str(stat_info.st_gid)
if Checksum == 'md5' or Checksum == 'sha-256':
d['Checksum'] = '0'
elif Checksum == "ctime":
d['Checksum']= str(int(stat_info.st_ctime))
else : # Checksum == "mtime":
d['Checksum']= str(int(stat_info.st_mtime))
d['Mode'] = str(oct(stat_info.st_mode))[-3:]
d['ModifiedDate'] = int(stat_info.st_mtime)
d['CreatedDate'] = int(stat_info.st_ctime)
d['FileSize'] = stat_info.st_size
d['Contents'] = ''
if Links == 'manage' and os.path.islink(dname):
d['Contents'] = 'Symlink to ' + os.readlink(dname)
return d
@contextmanager
def opened_w_error(filename, mode="r"):
try:
f = codecs.open(filename, encoding='utf8', mode=mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
@contextmanager
def opened_bin_w_error(filename, mode="rb"):
try:
f = open(filename, mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def ReadFileLimited(path, MaxContentsReturnable):
"""
Safely attempt to read a text file,
ensuring file is always closed at exit.
Read up to MaxContentsReturnable.
"""
d = ''
error = None
with opened_w_error(path) as (F, error):
if not error:
try:
d = F.read(MaxContentsReturnable)
except:
F.close()
return d.encode().decode('ascii','ignore'), error
def LStatFile(path):
"""
LStat the file. Do not follow the symlink.
"""
d = None
try:
d = os.lstat(path)
except:
pass
return d
def StatFile(path):
"""
Stat the file, following the symlink.
"""
d = None
try:
d = os.stat(path)
except:
pass
return d
def GetChecksum(fname, Checksum):
src_error = None
src_block = b'loopme'
if Checksum == "md5":
src_hash = md5const()
else : # sha-256
src_hash = shaconst()
with opened_bin_w_error(fname, 'rb') as (src_file, src_error):
if src_error:
return ""
while src_block :
src_block = src_file.read(8192)
src_hash.update(src_block)
return src_hash.hexdigest()
| mit |
gtaylor/python-colormath | examples/delta_e_matrix.py | 1 | 1329 | # -*- coding: utf-8 -*-
"""
For a massive matrix of colors and color labels you can download
the follow two files
# http://lyst-classifiers.s3.amazonaws.com/color/lab-colors.pk
# http://lyst-classifiers.s3.amazonaws.com/color/lab-matrix.pk
lab-colors is a cPickled list of color names and lab-matrix is a
cPickled (n,3) numpy array LAB values such that row q maps to
index q in the lab color list
"""
import sys
import csv
import bz2
import numpy as np
# Does some sys.path manipulation so we can run examples in-place.
# noinspection PyUnresolvedReferences
import example_config # noqa
from colormath.color_diff_matrix import delta_e_cie2000
from colormath.color_objects import LabColor
# load list of 1000 random colors from the XKCD color chart
if sys.version_info >= (3, 0):
reader = csv.DictReader(bz2.open("lab_matrix.csv.bz2", mode="rt"))
lab_matrix = np.array([list(map(float, row.values())) for row in reader])
else:
reader = csv.DictReader(bz2.BZ2File("lab_matrix.csv.bz2"))
lab_matrix = np.array([map(float, row.values()) for row in reader])
color = LabColor(lab_l=69.34, lab_a=-0.88, lab_b=-52.57)
lab_color_vector = np.array([color.lab_l, color.lab_a, color.lab_b])
delta = delta_e_cie2000(lab_color_vector, lab_matrix)
print("%s is closest to %s" % (color, lab_matrix[np.argmin(delta)]))
| bsd-3-clause |
tima/ansible | lib/ansible/modules/notification/catapult.py | 49 | 4362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Jonathan Mainguy <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# basis of code taken from the ansible twillio and nexmo modules
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: catapult
version_added: 2.4
short_description: Send a sms / mms using the catapult bandwidth api
description:
- Allows notifications to be sent using sms / mms via the catapult bandwidth api.
options:
src:
description:
- One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
required: true
default: null
dest:
description:
- The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
required: true
default: null
msg:
description:
- The contents of the text message (must be 2048 characters or less).
required: true
default: null
media:
description:
- For MMS messages, a media url to the location of the media to be sent with the message.
user_id:
description:
- User Id from Api account page.
required: true
default: null
api_token:
description:
- Api Token from Api account page.
required: true
default: null
api_secret:
description:
- Api Secret from Api account page.
required: true
default: null
author: "Jonathan Mainguy (@Jmainguy)"
notes:
- Will return changed even if the media url is wrong.
- Will return changed if the destination number is invalid.
'''
EXAMPLES = '''
- name: Send a mms to multiple users
catapult:
src: "+15035555555"
dest:
- "+12525089000"
- "+12018994225"
media: "http://example.com/foobar.jpg"
msg: "Task is complete"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
- name: Send a sms to a single user
catapult:
src: "+15035555555"
dest: "+12018994225"
msg: "Consider yourself notified"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
'''
RETURN = '''
changed:
description: Whether the api accepted the message.
returned: always
type: boolean
sample: True
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def send(module, src, dest, msg, media, user_id, api_token, api_secret):
"""
Send the message
"""
AGENT = "Ansible"
URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
data = {'from': src, 'to': dest, 'text': msg}
if media:
data['media'] = media
headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = api_token.replace('\n', '')
module.params['url_password'] = api_secret.replace('\n', '')
return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True),
dest=dict(required=True, type='list'),
msg=dict(required=True),
user_id=dict(required=True),
api_token=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
media=dict(default=None, required=False),
),
)
src = module.params['src']
dest = module.params['dest']
msg = module.params['msg']
media = module.params['media']
user_id = module.params['user_id']
api_token = module.params['api_token']
api_secret = module.params['api_secret']
for number in dest:
rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
if info["status"] != 201:
body = json.loads(info["body"])
fail_msg = body["message"]
module.fail_json(msg=fail_msg)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
kingvuplus/ts-gui-3 | lib/python/Components/Harddisk.py | 3 | 28983 | import os
import time
from Tools.CList import CList
from Tools.HardwareInfo import HardwareInfo
from SystemInfo import SystemInfo
from Components.Console import Console
import Task
def readFile(filename):
file = open(filename)
data = file.read().strip()
file.close()
return data
def getProcMounts():
try:
mounts = open("/proc/mounts", 'r')
except IOError, ex:
print "[Harddisk] Failed to open /proc/mounts", ex
return []
result = [line.strip().split(' ') for line in mounts]
for item in result:
# Spaces are encoded as \040 in mounts
item[1] = item[1].replace('\\040', ' ')
return result
def isFileSystemSupported(filesystem):
try:
for fs in open('/proc/filesystems', 'r'):
if fs.strip().endswith(filesystem):
return True
return False
except Exception, ex:
print "[Harddisk] Failed to read /proc/filesystems:", ex
def findMountPoint(path):
'Example: findMountPoint("/media/hdd/some/file") returns "/media/hdd"'
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
DEVTYPE_UDEV = 0
DEVTYPE_DEVFS = 1
class Harddisk:
def __init__(self, device, removable=False):
self.device = device
if os.access("/dev/.udev", 0):
self.type = DEVTYPE_UDEV
elif os.access("/dev/.devfsd", 0):
self.type = DEVTYPE_DEVFS
else:
print "[Harddisk] Unable to determine structure of /dev"
self.card = False
self.max_idle_time = 0
self.idle_running = False
self.last_access = time.time()
self.last_stat = 0
self.timer = None
self.is_sleeping = False
self.dev_path = ''
self.disk_path = ''
self.mount_path = None
self.mount_device = None
self.phys_path = os.path.realpath(self.sysfsPath('device'))
self.removable = removable
self.internal = "pci" in self.phys_path or "ahci" in self.phys_path
try:
data = open("/sys/block/%s/queue/rotational" % device, "r").read().strip()
self.rotational = int(data)
except:
self.rotational = True
if self.type == DEVTYPE_UDEV:
self.dev_path = '/dev/' + self.device
self.disk_path = self.dev_path
self.card = "sdhci" in self.phys_path
elif self.type == DEVTYPE_DEVFS:
tmp = readFile(self.sysfsPath('dev')).split(':')
s_major = int(tmp[0])
s_minor = int(tmp[1])
for disc in os.listdir("/dev/discs"):
dev_path = os.path.realpath('/dev/discs/' + disc)
disk_path = dev_path + '/disc'
try:
rdev = os.stat(disk_path).st_rdev
except OSError:
continue
if s_major == os.major(rdev) and s_minor == os.minor(rdev):
self.dev_path = dev_path
self.disk_path = disk_path
break
self.card = self.device[:2] == "hd" and "host0" not in self.dev_path
print "[Harddisk] new device", self.device, '->', self.dev_path, '->', self.disk_path
if not removable and not self.card:
self.startIdle()
def __lt__(self, ob):
return self.device < ob.device
def partitionPath(self, n):
if self.type == DEVTYPE_UDEV:
if self.dev_path.startswith('/dev/mmcblk0'):
return self.dev_path + "p" + n
else:
return self.dev_path + n
elif self.type == DEVTYPE_DEVFS:
return self.dev_path + '/part' + n
def sysfsPath(self, filename):
return os.path.join('/sys/block/', self.device, filename)
def stop(self):
if self.timer:
self.timer.stop()
self.timer.callback.remove(self.runIdle)
def bus(self):
ret = _("External")
# SD/MMC(F1 specific)
if self.type == DEVTYPE_UDEV:
type_name = " (SD/MMC)"
# CF(7025 specific)
elif self.type == DEVTYPE_DEVFS:
type_name = " (CF)"
if self.card:
ret += type_name
else:
if self.internal:
ret = _("Internal")
if not self.rotational:
ret += " (SSD)"
return ret
def diskSize(self):
cap = 0
try:
line = readFile(self.sysfsPath('size'))
cap = int(line)
return cap / 1000 * 512 / 1000
except:
dev = self.findMount()
if dev:
try:
stat = os.statvfs(dev)
cap = int(stat.f_blocks * stat.f_bsize)
return cap / 1000 / 1000
except:
pass
return cap
def capacity(self):
cap = self.diskSize()
if cap == 0:
return ""
if cap < 1000:
return "%03d MB" % cap
return "%d.%03d GB" % (cap/1000, cap%1000)
def model(self):
try:
if self.device[:2] == "hd":
return readFile('/proc/ide/' + self.device + '/model')
elif self.device[:2] == "sd":
vendor = readFile(self.sysfsPath('device/vendor'))
model = readFile(self.sysfsPath('device/model'))
return vendor + '(' + model + ')'
elif self.device.startswith('mmcblk0'):
return readFile(self.sysfsPath('device/name'))
else:
raise Exception, "[Harddisk] no hdX or sdX or mmcX"
except Exception, e:
print "[Harddisk] Failed to get model:", e
return "-?-"
def free(self):
dev = self.findMount()
if dev:
stat = os.statvfs(dev)
return (stat.f_bfree/1000) * (stat.f_bsize/1024)
return -1
def numPartitions(self):
numPart = -1
if self.type == DEVTYPE_UDEV:
try:
devdir = os.listdir('/dev')
except OSError:
return -1
for filename in devdir:
if filename.startswith(self.device):
numPart += 1
elif self.type == DEVTYPE_DEVFS:
try:
idedir = os.listdir(self.dev_path)
except OSError:
return -1
for filename in idedir:
if filename.startswith("disc"):
numPart += 1
if filename.startswith("part"):
numPart += 1
return numPart
def mountDevice(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
self.mount_device = parts[0]
self.mount_path = parts[1]
return parts[1]
return None
def enumMountDevices(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
yield parts[1]
def findMount(self):
if self.mount_path is None:
return self.mountDevice()
return self.mount_path
def unmount(self):
dev = self.mountDevice()
if dev is None:
# not mounted, return OK
return 0
cmd = 'umount ' + dev
print "[Harddisk]", cmd
res = os.system(cmd)
return (res >> 8)
def createPartition(self):
cmd = 'printf "8,\n;0,0\n;0,0\n;0,0\ny\n" | sfdisk -f -uS ' + self.disk_path
res = os.system(cmd)
return (res >> 8)
def mkfs(self):
# No longer supported, use createInitializeJob instead
return 1
def mount(self):
# try mounting through fstab first
if self.mount_device is None:
dev = self.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.mount_device
try:
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
except IOError:
return -1
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if fspath == dev:
print "[Harddisk] mounting:", fspath
cmd = "mount -t auto " + fspath
res = os.system(cmd)
return (res >> 8)
# device is not in fstab
res = -1
if self.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
res = os.system('hdparm -z ' + self.disk_path)
# give udev some time to make the mount, which it will do asynchronously
from time import sleep
sleep(3)
return (res >> 8)
def fsck(self):
# No longer supported, use createCheckJob instead
return 1
def killPartitionTable(self):
zero = 512 * '\0'
h = open(self.dev_path, 'wb')
# delete first 9 sectors, which will likely kill the first partition too
for i in range(9):
h.write(zero)
h.close()
def killPartition(self, n):
zero = 512 * '\0'
part = self.partitionPath(n)
h = open(part, 'wb')
for i in range(3):
h.write(zero)
h.close()
def createInitializeJob(self):
job = Task.Job(_("Initializing storage device..."))
size = self.diskSize()
print "[HD] size: %s MB" % size
task = UnmountTask(job, self)
task = Task.PythonTask(job, _("Removing partition table"))
task.work = self.killPartitionTable
task.weighting = 1
task = Task.LoggingTask(job, _("Rereading partition table"))
task.weighting = 1
task.setTool('hdparm')
task.args.append('-z')
task.args.append(self.disk_path)
task = Task.ConditionTask(job, _("Waiting for partition"), timeoutCount=20)
task.check = lambda: not os.path.exists(self.partitionPath("1"))
task.weighting = 1
if os.path.exists('/usr/sbin/parted'):
use_parted = True
else:
if size > 2097151:
addInstallTask(job, 'parted')
use_parted = True
else:
use_parted = False
task = Task.LoggingTask(job, _("Creating partition"))
task.weighting = 5
if use_parted:
task.setTool('parted')
if size < 1024:
# On very small devices, align to block only
alignment = 'min'
else:
# Prefer optimal alignment for performance
alignment = 'opt'
if size > 2097151:
parttype = 'gpt'
else:
parttype = 'msdos'
task.args += ['-a', alignment, '-s', self.disk_path, 'mklabel', parttype, 'mkpart', 'primary', '0%', '100%']
else:
task.setTool('sfdisk')
task.args.append('-f')
task.args.append('-uS')
task.args.append(self.disk_path)
if size > 128000:
# Start at sector 8 to better support 4k aligned disks
print "[HD] Detected >128GB disk, using 4k alignment"
task.initial_input = "8,,L\n;0,0\n;0,0\n;0,0\ny\n"
else:
# Smaller disks (CF cards, sticks etc) don't need that
task.initial_input = ",,L\n;\n;\n;\ny\n"
task = Task.ConditionTask(job, _("Waiting for partition"))
task.check = lambda: os.path.exists(self.partitionPath("1"))
task.weighting = 1
task = MkfsTask(job, _("Creating filesystem"))
big_o_options = ["dir_index"]
if isFileSystemSupported("ext4"):
task.setTool("mkfs.ext4")
if size > 20000:
try:
version = map(int, open("/proc/version","r").read().split(' ', 4)[2].split('.',2)[:2])
if (version[0] > 3) or ((version[0] > 2) and (version[1] >= 2)):
# Linux version 3.2 supports bigalloc and -C option, use 256k blocks
task.args += ["-C", "262144"]
big_o_options.append("bigalloc")
except Exception, ex:
print "Failed to detect Linux version:", ex
else:
task.setTool("mkfs.ext3")
if size > 250000:
# No more than 256k i-nodes (prevent problems with fsck memory requirements)
task.args += ["-T", "largefile", "-N", "262144"]
big_o_options.append("sparse_super")
elif size > 16384:
# between 16GB and 250GB: 1 i-node per megabyte
task.args += ["-T", "largefile"]
big_o_options.append("sparse_super")
elif size > 2048:
# Over 2GB: 32 i-nodes per megabyte
task.args += ["-T", "largefile", "-N", str(size * 32)]
task.args += ["-m0", "-O", ",".join(big_o_options), self.partitionPath("1")]
task = MountTask(job, self)
task.weighting = 3
task = Task.ConditionTask(job, _("Waiting for mount"), timeoutCount=20)
task.check = self.mountDevice
task.weighting = 1
return job
def initialize(self):
# no longer supported
return -5
def check(self):
# no longer supported
return -5
def createCheckJob(self):
job = Task.Job(_("Checking filesystem..."))
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-f')
task.args.append('-p')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def createExt4ConversionJob(self):
if not isFileSystemSupported('ext4'):
raise Exception, _("You system does not support ext4")
job = Task.Job(_("Converting ext3 to ext4..."))
if not os.path.exists('/sbin/tune2fs'):
addInstallTask(job, 'e2fsprogs-tune2fs')
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-p')
task.args.append(dev)
task = Task.LoggingTask(job, "tune2fs")
task.setTool('tune2fs')
task.args.append('-O')
task.args.append('extents,uninit_bg,dir_index')
task.args.append('-o')
task.args.append('journal_data_writeback')
task.args.append(dev)
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext4')
task.postconditions = [] # ignore result, it will always "fail"
task.args.append('-f')
task.args.append('-p')
task.args.append('-D')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def getDeviceDir(self):
return self.dev_path
def getDeviceName(self):
return self.disk_path
# the HDD idle poll daemon.
# as some harddrives have a buggy standby timer, we are doing this by hand here.
# first, we disable the hardware timer. then, we check every now and then if
# any access has been made to the disc. If there has been no access over a specifed time,
# we set the hdd into standby.
def readStats(self):
try:
l = open("/sys/block/%s/stat" % self.device).read()
except IOError:
return -1,-1
data = l.split(None,5)
return (int(data[0]), int(data[4]))
def startIdle(self):
from enigma import eTimer
# disable HDD standby timer
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--set=SCT=0", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-S0", self.disk_path))
self.timer = eTimer()
self.timer.callback.append(self.runIdle)
self.idle_running = True
self.setIdleTime(self.max_idle_time) # kick the idle polling loop
def runIdle(self):
if not self.max_idle_time:
return
t = time.time()
idle_time = t - self.last_access
stats = self.readStats()
l = sum(stats)
if l != self.last_stat and l >= 0: # access
self.last_stat = l
self.last_access = t
idle_time = 0
self.is_sleeping = False
if idle_time >= self.max_idle_time and not self.is_sleeping:
self.setSleep()
self.is_sleeping = True
def setSleep(self):
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--flexible", "--readonly", "--command=stop", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-y", self.disk_path))
def setIdleTime(self, idle):
self.max_idle_time = idle
if self.idle_running:
if not idle:
self.timer.stop()
else:
self.timer.start(idle * 100, False) # poll 10 times per period.
def isSleeping(self):
return self.is_sleeping
class Partition:
# for backward compatibility, force_mounted actually means "hotplug"
def __init__(self, mountpoint, device = None, description = "", force_mounted = False):
self.mountpoint = mountpoint
self.description = description
self.force_mounted = mountpoint and force_mounted
self.is_hotplug = force_mounted # so far; this might change.
self.device = device
def __str__(self):
return "Partition(mountpoint=%s,description=%s,device=%s)" % (self.mountpoint,self.description,self.device)
def stat(self):
if self.mountpoint:
return os.statvfs(self.mountpoint)
else:
raise OSError, "Device %s is not mounted" % self.device
def free(self):
try:
s = self.stat()
return s.f_bavail * s.f_bsize
except OSError:
return None
def total(self):
try:
s = self.stat()
return s.f_blocks * s.f_bsize
except OSError:
return None
def tabbedDescription(self):
if self.mountpoint.startswith('/media/net') or self.mountpoint.startswith('/media/autofs'):
# Network devices have a user defined name
return self.description
return self.description + '\t' + self.mountpoint
def mounted(self, mounts = None):
# THANK YOU PYTHON FOR STRIPPING AWAY f_fsid.
# TODO: can os.path.ismount be used?
if self.force_mounted:
return True
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for parts in mounts:
if self.mountpoint.startswith(parts[1]): # use startswith so a mount not ending with '/' is also detected.
return True
return False
def filesystem(self, mounts = None):
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for fields in mounts:
if self.mountpoint.endswith('/') and not self.mountpoint == '/':
if fields[1] + '/' == self.mountpoint:
return fields[2]
else:
if fields[1] == self.mountpoint:
return fields[2]
return ''
DEVICEDB = \
{"dm8000":
{
# dm8000:
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.1/1-1.1:1.0": "Front USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.2/1-1.2:1.0": "Back, upper USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.3/1-1.3:1.0": "Back, lower USB Slot",
"/devices/platform/brcm-ehci-1.1/usb2/2-1/2-1:1.0/host1/target1:0:0/1:0:0:0": "DVD Drive",
},
"dm800":
{
# dm800:
"/devices/platform/brcm-ehci.0/usb1/1-2/1-2:1.0": "Upper USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1:1.0": "Lower USB Slot",
},
"dm7025":
{
# dm7025:
"/devices/pci0000:00/0000:00:14.1/ide1/1.0": "CF Card Slot", #hdc
"/devices/pci0000:00/0000:00:14.1/ide0/0.0": "Internal Harddisk"
}
}
def addInstallTask(job, package):
task = Task.LoggingTask(job, "update packages")
task.setTool('opkg')
task.args.append('update')
task = Task.LoggingTask(job, "Install " + package)
task.setTool('opkg')
task.args.append('install')
task.args.append(package)
class HarddiskManager:
def __init__(self):
self.hdd = [ ]
self.cd = ""
self.partitions = [ ]
self.devices_scanned_on_init = [ ]
self.on_partition_list_change = CList()
self.enumerateBlockDevices()
# Find stuff not detected by the enumeration
p = (
("/media/hdd", _("Hard disk")),
("/media/card", _("Card")),
("/media/cf", _("Compact flash")),
("/media/mmc1", _("MMC card")),
("/media/net", _("Network mount")),
("/media/net1", _("Network mount %s") % ("1")),
("/media/net2", _("Network mount %s") % ("2")),
("/media/net3", _("Network mount %s") % ("3")),
("/media/ram", _("Ram disk")),
("/media/usb", _("USB stick")),
("/", _("Internal flash"))
)
known = set([os.path.normpath(a.mountpoint) for a in self.partitions if a.mountpoint])
for m,d in p:
if (m not in known) and os.path.ismount(m):
self.partitions.append(Partition(mountpoint=m, description=d))
def getBlockDevInfo(self, blockdev):
devpath = "/sys/block/" + blockdev
error = False
removable = False
blacklisted = False
is_cdrom = False
partitions = []
try:
if os.path.exists(devpath + "/removable"):
removable = bool(int(readFile(devpath + "/removable")))
if os.path.exists(devpath + "/dev"):
dev = int(readFile(devpath + "/dev").split(':')[0])
else:
dev = None
if HardwareInfo().get_device_model().startswith('vusolo4k'):
devlist = [1, 7, 31, 253, 254, 179] # ram, loop, mtdblock, romblock, ramzswap, mmc
else:
devlist = [1, 7, 31, 253, 254] # ram, loop, mtdblock, romblock, ramzswap
if dev in devlist:
blacklisted = True
if blockdev[0:2] == 'sr':
is_cdrom = True
if blockdev[0:2] == 'hd':
try:
media = readFile("/proc/ide/%s/media" % blockdev)
if "cdrom" in media:
is_cdrom = True
except IOError:
error = True
# check for partitions
if not is_cdrom and os.path.exists(devpath):
for partition in os.listdir(devpath):
if partition[0:len(blockdev)] != blockdev:
continue
partitions.append(partition)
else:
self.cd = blockdev
except IOError:
error = True
# check for medium
medium_found = True
try:
open("/dev/" + blockdev).close()
except IOError, err:
if err.errno == 159: # no medium present
medium_found = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def enumerateBlockDevices(self):
print "[Harddisk] enumerating block devices..."
for blockdev in os.listdir("/sys/block"):
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.addHotplugPartition(blockdev)
if not error and not blacklisted and medium_found:
for part in partitions:
self.addHotplugPartition(part)
self.devices_scanned_on_init.append((blockdev, removable, is_cdrom, medium_found))
def getAutofsMountpoint(self, device):
r = self.getMountpoint(device)
if r is None:
return "/media/" + device
return r
def getMountpoint(self, device):
dev = "/dev/%s" % device
for item in getProcMounts():
if item[0] == dev:
return item[1]
return None
def addHotplugPartition(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = self.getMountpoint(device), description = description, force_mounted = True, device = device)
self.partitions.append(p)
if p.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("add", p)
# see if this is a harddrive
l = len(device)
if l and (not device[l-1].isdigit() or device == 'mmcblk0'):
self.hdd.append(Harddisk(device, removable))
self.hdd.sort()
SystemInfo["Harddisk"] = True
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def addHotplugAudiocd(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = "/media/audiocd", description = description, force_mounted = True, device = device)
self.partitions.append(p)
self.on_partition_list_change("add", p)
SystemInfo["Harddisk"] = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def removeHotplugPartition(self, device):
for x in self.partitions[:]:
if x.device == device:
self.partitions.remove(x)
if x.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("remove", x)
l = len(device)
if l and not device[l-1].isdigit():
for hdd in self.hdd:
if hdd.device == device:
hdd.stop()
self.hdd.remove(hdd)
break
SystemInfo["Harddisk"] = len(self.hdd) > 0
def HDDCount(self):
return len(self.hdd)
def HDDList(self):
list = [ ]
for hd in self.hdd:
hdd = hd.model() + " - " + hd.bus()
cap = hd.capacity()
if cap != "":
hdd += " (" + cap + ")"
list.append((hdd, hd))
return list
def getCD(self):
return self.cd
def getMountedPartitions(self, onlyhotplug = False, mounts=None):
if mounts is None:
mounts = getProcMounts()
parts = [x for x in self.partitions if (x.is_hotplug or not onlyhotplug) and x.mounted(mounts)]
devs = set([x.device for x in parts])
for devname in devs.copy():
if not devname:
continue
dev, part = self.splitDeviceName(devname)
if part and dev in devs: # if this is a partition and we still have the wholedisk, remove wholedisk
devs.remove(dev)
# return all devices which are not removed due to being a wholedisk when a partition exists
return [x for x in parts if not x.device or x.device in devs]
def splitDeviceName(self, devname):
# this works for: sdaX, hdaX, sr0 (which is in fact dev="sr0", part=""). It doesn't work for other names like mtdblock3, but they are blacklisted anyway.
dev = devname[:3]
part = devname[3:]
for p in part:
if not p.isdigit():
return devname, 0
return dev, part and int(part) or 0
def getUserfriendlyDeviceName(self, dev, phys):
dev, part = self.splitDeviceName(dev)
description = _("External Storage %s") % dev
try:
description = readFile("/sys" + phys + "/model")
except IOError, s:
print "couldn't read model: ", s
from Tools.HardwareInfo import HardwareInfo
for physdevprefix, pdescription in DEVICEDB.get(HardwareInfo().device_name,{}).items():
if phys.startswith(physdevprefix):
description = pdescription
# not wholedisk and not partition 1
if part and part != 1:
description += _(" (Partition %d)") % part
return description
def addMountedPartition(self, device, desc):
for x in self.partitions:
if x.mountpoint == device:
#already_mounted
return
self.partitions.append(Partition(mountpoint=device, description=desc))
def removeMountedPartition(self, mountpoint):
for x in self.partitions[:]:
if x.mountpoint == mountpoint:
self.partitions.remove(x)
self.on_partition_list_change("remove", x)
def setDVDSpeed(self, device, speed = 0):
ioctl_flag=int(0x5322)
if not device.startswith('/'):
device = "/dev/" + device
try:
from fcntl import ioctl
cd = open(device)
ioctl(cd.fileno(), ioctl_flag, speed)
cd.close()
except Exception, ex:
print "[Harddisk] Failed to set %s speed to %s" % (device, speed), ex
class UnmountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Unmount"))
self.hdd = hdd
self.mountpoints = []
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
open('/dev/nomount.%s' % dev, "wb").close()
except Exception, e:
print "ERROR: Failed to create /dev/nomount file:", e
self.setTool('umount')
self.args.append('-f')
for dev in self.hdd.enumMountDevices():
self.args.append(dev)
self.postconditions.append(Task.ReturncodePostcondition())
self.mountpoints.append(dev)
if not self.mountpoints:
print "UnmountTask: No mountpoints found?"
self.cmd = 'true'
self.args = [self.cmd]
def afterRun(self):
for path in self.mountpoints:
try:
os.rmdir(path)
except Exception, ex:
print "Failed to remove path '%s':" % path, ex
class MountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Mount"))
self.hdd = hdd
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
os.unlink('/dev/nomount.%s' % dev)
except Exception, e:
print "ERROR: Failed to remove /dev/nomount file:", e
# try mounting through fstab first
if self.hdd.mount_device is None:
dev = self.hdd.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.hdd.mount_device
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if os.path.realpath(fspath) == dev:
self.setCmdline("mount -t auto " + fspath)
self.postconditions.append(Task.ReturncodePostcondition())
return
# device is not in fstab
if self.hdd.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
# Sorry for the sleep 2 hack...
self.setCmdline('sleep 2; hdparm -z ' + self.hdd.disk_path)
self.postconditions.append(Task.ReturncodePostcondition())
class MkfsTask(Task.LoggingTask):
def prepare(self):
self.fsck_state = None
def processOutput(self, data):
print "[Mkfs]", data
if 'Writing inode tables:' in data:
self.fsck_state = 'inode'
elif 'Creating journal' in data:
self.fsck_state = 'journal'
self.setProgress(80)
elif 'Writing superblocks ' in data:
self.setProgress(95)
elif self.fsck_state == 'inode':
if '/' in data:
try:
d = data.strip(' \x08\r\n').split('/',1)
if '\x08' in d[1]:
d[1] = d[1].split('\x08',1)[0]
self.setProgress(80*int(d[0])/int(d[1]))
except Exception, e:
print "[Mkfs] E:", e
return # don't log the progess
self.log.append(data)
harddiskmanager = HarddiskManager()
def isSleepStateDevice(device):
ret = os.popen("hdparm -C %s" % device).read()
if 'SG_IO' in ret or 'HDIO_DRIVE_CMD' in ret:
return None
if 'drive state is: standby' in ret or 'drive state is: idle' in ret:
return True
elif 'drive state is: active/idle' in ret:
return False
return None
def internalHDDNotSleeping(external=False):
state = False
if harddiskmanager.HDDCount():
for hdd in harddiskmanager.HDDList():
if hdd[1].internal or external:
if hdd[1].idle_running and hdd[1].max_idle_time and not hdd[1].isSleeping():
state = True
return state
SystemInfo["ext4"] = isFileSystemSupported("ext4")
| gpl-2.0 |
bluesea/zulip | tools/deprecated/finbot/monthdelta.py | 115 | 6015 | """monthdelta
Date calculation with months: MonthDelta class and monthmod() function.
"""
__all__ = ['MonthDelta', 'monthmod']
from datetime import date, timedelta
class MonthDelta:
"""Number of months offset from a date or datetime.
MonthDeltas allow date calculation without regard to the different lengths
of different months. A MonthDelta value added to a date produces another
date that has the same day-of-the-month, regardless of the lengths of the
intervening months. If the resulting date is in too short a month, the
last day in that month will result:
date(2008,1,30) + MonthDelta(1) -> date(2008,2,29)
MonthDeltas may be added, subtracted, multiplied, and floor-divided
similarly to timedeltas. They may not be added to timedeltas directly, as
both classes are intended to be used directly with dates and datetimes.
Only ints may be passed to the constructor. MonthDeltas are immutable.
NOTE: in calculations involving the 29th, 30th, and 31st days of the
month, MonthDeltas are not necessarily invertible [i.e., the result above
would not imply that date(2008,2,29) - MonthDelta(1) -> date(2008,1,30)].
"""
__slots__ = ('__months',)
def __init__(self, months=1):
if not isinstance(months, int):
raise TypeError('months must be an integer')
self.__months = months
def months(self):
return self.__months
months = property(months)
def __repr__(self):
try:
return 'MonthDelta({0})'.format(self.__months)
except AttributeError:
return 'MonthDelta(' + str(self.__months) + ')'
def __str__(self):
return str(self.__months) + ' month' + ((abs(self.__months) != 1
and 's') or '')
def __hash__(self):
return hash(self.__months)
def __eq__(self, other):
if isinstance(other, MonthDelta):
return (self.__months == other.months)
return False
def __ne__(self, other):
if isinstance(other, MonthDelta):
return (self.__months != other.months)
return True
def __lt__(self, other):
if isinstance(other, MonthDelta):
return (self.__months < other.months)
return NotImplemented
def __le__(self, other):
if isinstance(other, MonthDelta):
return (self.__months <= other.months)
return NotImplemented
def __gt__(self, other):
if isinstance(other, MonthDelta):
return (self.__months > other.months)
return NotImplemented
def __ge__(self, other):
if isinstance(other, MonthDelta):
return (self.__months >= other.months)
return NotImplemented
def __add__(self, other):
if isinstance(other, MonthDelta):
return MonthDelta(self.__months + other.months)
if isinstance(other, date):
day = other.day
# subract one because months are not zero-based
month = other.month + self.__months - 1
year = other.year + month // 12
# now add it back
month = month % 12 + 1
if month == 2:
if day >= 29 and not year%4 and (year%100 or not year%400):
day = 29
elif day > 28:
day = 28
elif month in (4,6,9,11) and day > 30:
day = 30
try:
return other.replace(year, month, day)
except ValueError:
raise OverflowError('date value out of range')
return NotImplemented
def __sub__(self, other):
if isinstance(other, MonthDelta):
return MonthDelta(self.__months - other.months)
return NotImplemented
def __mul__(self, other):
if isinstance(other, int):
return MonthDelta(self.__months * other)
return NotImplemented
def __floordiv__(self, other):
# MonthDelta // MonthDelta -> int
if isinstance(other, MonthDelta):
return self.__months // other.months
if isinstance(other, int):
return MonthDelta(self.__months // other)
return NotImplemented
def __radd__(self, other):
return self + other
def __rsub__(self, other):
return -self + other
def __rmul__(self, other):
return self * other
def __ifloordiv__(self, other):
# in-place division by a MonthDelta (which will change the variable's
# type) is almost certainly a bug -- raising this error is the reason
# we don't just fall back on __floordiv__
if isinstance(other, MonthDelta):
raise TypeError('in-place division of a MonthDelta requires an '
'integer divisor')
if isinstance(other, int):
return MonthDelta(self.__months // other)
return NotImplemented
def __neg__(self):
return MonthDelta(-self.__months)
def __pos__(self):
return MonthDelta(+self.__months)
def __abs__(self):
return MonthDelta(abs(self.__months))
def __bool__(self):
return bool(self.__months)
__nonzero__ = __bool__
def monthmod(start, end):
"""Months between dates, plus leftover time.
Distribute the interim between start and end dates into MonthDelta and
timedelta portions. If and only if start is after end, returned MonthDelta
will be negative. Returned timedelta is always non-negative, and is always
smaller than the month in which the end date occurs.
Invariant: dt + monthmod(dt, dt+td)[0] + monthmod(dt, dt+td)[1] = dt + td
"""
if not (isinstance(start, date) and isinstance(end, date)):
raise TypeError('start and end must be dates')
md = MonthDelta(12*(end.year - start.year) + end.month - start.month -
int(start.day > end.day))
# will overflow (underflow?) for end near date.min
return md, end - (start + md)
| apache-2.0 |
Niknakflak/-tg-station | tools/midi2piano/pyperclip/windows.py | 110 | 5405 | """
This module implements clipboard handling on Windows using ctypes.
"""
import time
import contextlib
import ctypes
from ctypes import c_size_t, sizeof, c_wchar_p, get_errno, c_wchar
from .exceptions import PyperclipWindowsException
class CheckedCall(object):
def __init__(self, f):
super(CheckedCall, self).__setattr__("f", f)
def __call__(self, *args):
ret = self.f(*args)
if not ret and get_errno():
raise PyperclipWindowsException("Error calling " + self.f.__name__)
return ret
def __setattr__(self, key, value):
setattr(self.f, key, value)
def init_windows_clipboard():
from ctypes.wintypes import (HGLOBAL, LPVOID, DWORD, LPCSTR, INT, HWND,
HINSTANCE, HMENU, BOOL, UINT, HANDLE)
windll = ctypes.windll
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT,
INT, INT, HWND, HMENU, HINSTANCE, LPVOID]
safeCreateWindowExA.restype = HWND
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
safeDestroyWindow.argtypes = [HWND]
safeDestroyWindow.restype = BOOL
OpenClipboard = windll.user32.OpenClipboard
OpenClipboard.argtypes = [HWND]
OpenClipboard.restype = BOOL
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
safeCloseClipboard.argtypes = []
safeCloseClipboard.restype = BOOL
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
safeEmptyClipboard.argtypes = []
safeEmptyClipboard.restype = BOOL
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
safeGetClipboardData.argtypes = [UINT]
safeGetClipboardData.restype = HANDLE
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
safeSetClipboardData.argtypes = [UINT, HANDLE]
safeSetClipboardData.restype = HANDLE
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
safeGlobalAlloc.argtypes = [UINT, c_size_t]
safeGlobalAlloc.restype = HGLOBAL
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
safeGlobalLock.argtypes = [HGLOBAL]
safeGlobalLock.restype = LPVOID
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
safeGlobalUnlock.argtypes = [HGLOBAL]
safeGlobalUnlock.restype = BOOL
GMEM_MOVEABLE = 0x0002
CF_UNICODETEXT = 13
@contextlib.contextmanager
def window():
"""
Context that provides a valid Windows hwnd.
"""
# we really just need the hwnd, so setting "STATIC"
# as predefined lpClass is just fine.
hwnd = safeCreateWindowExA(0, b"STATIC", None, 0, 0, 0, 0, 0,
None, None, None, None)
try:
yield hwnd
finally:
safeDestroyWindow(hwnd)
@contextlib.contextmanager
def clipboard(hwnd):
"""
Context manager that opens the clipboard and prevents
other applications from modifying the clipboard content.
"""
# We may not get the clipboard handle immediately because
# some other application is accessing it (?)
# We try for at least 500ms to get the clipboard.
t = time.time() + 0.5
success = False
while time.time() < t:
success = OpenClipboard(hwnd)
if success:
break
time.sleep(0.01)
if not success:
raise PyperclipWindowsException("Error calling OpenClipboard")
try:
yield
finally:
safeCloseClipboard()
def copy_windows(text):
# This function is heavily based on
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
with window() as hwnd:
# http://msdn.com/ms649048
# If an application calls OpenClipboard with hwnd set to NULL,
# EmptyClipboard sets the clipboard owner to NULL;
# this causes SetClipboardData to fail.
# => We need a valid hwnd to copy something.
with clipboard(hwnd):
safeEmptyClipboard()
if text:
# http://msdn.com/ms649051
# If the hMem parameter identifies a memory object,
# the object must have been allocated using the
# function with the GMEM_MOVEABLE flag.
count = len(text) + 1
handle = safeGlobalAlloc(GMEM_MOVEABLE,
count * sizeof(c_wchar))
locked_handle = safeGlobalLock(handle)
ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar))
safeGlobalUnlock(handle)
safeSetClipboardData(CF_UNICODETEXT, handle)
def paste_windows():
with clipboard(None):
handle = safeGetClipboardData(CF_UNICODETEXT)
if not handle:
# GetClipboardData may return NULL with errno == NO_ERROR
# if the clipboard is empty.
# (Also, it may return a handle to an empty buffer,
# but technically that's not empty)
return ""
return c_wchar_p(handle).value
return copy_windows, paste_windows
| agpl-3.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/cryptography/hazmat/backends/openssl/encode_asn1.py | 3 | 23428 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import calendar
import ipaddress
import six
from cryptography import utils, x509
from cryptography.hazmat.backends.openssl.decode_asn1 import (
_CRL_ENTRY_REASON_ENUM_TO_CODE, _DISTPOINT_TYPE_FULLNAME,
_DISTPOINT_TYPE_RELATIVENAME
)
from cryptography.x509.name import _ASN1Type
from cryptography.x509.oid import (
CRLEntryExtensionOID, ExtensionOID, OCSPExtensionOID,
)
def _encode_asn1_int(backend, x):
"""
Converts a python integer to an ASN1_INTEGER. The returned ASN1_INTEGER
will not be garbage collected (to support adding them to structs that take
ownership of the object). Be sure to register it for GC if it will be
discarded after use.
"""
# Convert Python integer to OpenSSL "bignum" in case value exceeds
# machine's native integer limits (note: `int_to_bn` doesn't automatically
# GC).
i = backend._int_to_bn(x)
i = backend._ffi.gc(i, backend._lib.BN_free)
# Wrap in an ASN.1 integer. Don't GC -- as documented.
i = backend._lib.BN_to_ASN1_INTEGER(i, backend._ffi.NULL)
backend.openssl_assert(i != backend._ffi.NULL)
return i
def _encode_asn1_int_gc(backend, x):
i = _encode_asn1_int(backend, x)
i = backend._ffi.gc(i, backend._lib.ASN1_INTEGER_free)
return i
def _encode_asn1_str(backend, data):
"""
Create an ASN1_OCTET_STRING from a Python byte string.
"""
s = backend._lib.ASN1_OCTET_STRING_new()
res = backend._lib.ASN1_OCTET_STRING_set(s, data, len(data))
backend.openssl_assert(res == 1)
return s
def _encode_asn1_utf8_str(backend, string):
"""
Create an ASN1_UTF8STRING from a Python unicode string.
This object will be an ASN1_STRING with UTF8 type in OpenSSL and
can be decoded with ASN1_STRING_to_UTF8.
"""
s = backend._lib.ASN1_UTF8STRING_new()
res = backend._lib.ASN1_STRING_set(
s, string.encode("utf8"), len(string.encode("utf8"))
)
backend.openssl_assert(res == 1)
return s
def _encode_asn1_str_gc(backend, data):
s = _encode_asn1_str(backend, data)
s = backend._ffi.gc(s, backend._lib.ASN1_OCTET_STRING_free)
return s
def _encode_inhibit_any_policy(backend, inhibit_any_policy):
return _encode_asn1_int_gc(backend, inhibit_any_policy.skip_certs)
def _encode_name(backend, name):
"""
The X509_NAME created will not be gc'd. Use _encode_name_gc if needed.
"""
subject = backend._lib.X509_NAME_new()
for rdn in name.rdns:
set_flag = 0 # indicate whether to add to last RDN or create new RDN
for attribute in rdn:
name_entry = _encode_name_entry(backend, attribute)
# X509_NAME_add_entry dups the object so we need to gc this copy
name_entry = backend._ffi.gc(
name_entry, backend._lib.X509_NAME_ENTRY_free
)
res = backend._lib.X509_NAME_add_entry(
subject, name_entry, -1, set_flag)
backend.openssl_assert(res == 1)
set_flag = -1
return subject
def _encode_name_gc(backend, attributes):
subject = _encode_name(backend, attributes)
subject = backend._ffi.gc(subject, backend._lib.X509_NAME_free)
return subject
def _encode_sk_name_entry(backend, attributes):
"""
The sk_X509_NAME_ENTRY created will not be gc'd.
"""
stack = backend._lib.sk_X509_NAME_ENTRY_new_null()
for attribute in attributes:
name_entry = _encode_name_entry(backend, attribute)
res = backend._lib.sk_X509_NAME_ENTRY_push(stack, name_entry)
backend.openssl_assert(res >= 1)
return stack
def _encode_name_entry(backend, attribute):
if attribute._type is _ASN1Type.BMPString:
value = attribute.value.encode('utf_16_be')
else:
value = attribute.value.encode('utf8')
obj = _txt2obj_gc(backend, attribute.oid.dotted_string)
name_entry = backend._lib.X509_NAME_ENTRY_create_by_OBJ(
backend._ffi.NULL, obj, attribute._type.value, value, len(value)
)
return name_entry
def _encode_crl_number_delta_crl_indicator(backend, ext):
return _encode_asn1_int_gc(backend, ext.crl_number)
def _encode_issuing_dist_point(backend, ext):
idp = backend._lib.ISSUING_DIST_POINT_new()
backend.openssl_assert(idp != backend._ffi.NULL)
idp = backend._ffi.gc(idp, backend._lib.ISSUING_DIST_POINT_free)
idp.onlyuser = 255 if ext.only_contains_user_certs else 0
idp.onlyCA = 255 if ext.only_contains_ca_certs else 0
idp.indirectCRL = 255 if ext.indirect_crl else 0
idp.onlyattr = 255 if ext.only_contains_attribute_certs else 0
if ext.only_some_reasons:
idp.onlysomereasons = _encode_reasonflags(
backend, ext.only_some_reasons
)
if ext.full_name:
idp.distpoint = _encode_full_name(backend, ext.full_name)
if ext.relative_name:
idp.distpoint = _encode_relative_name(backend, ext.relative_name)
return idp
def _encode_crl_reason(backend, crl_reason):
asn1enum = backend._lib.ASN1_ENUMERATED_new()
backend.openssl_assert(asn1enum != backend._ffi.NULL)
asn1enum = backend._ffi.gc(asn1enum, backend._lib.ASN1_ENUMERATED_free)
res = backend._lib.ASN1_ENUMERATED_set(
asn1enum, _CRL_ENTRY_REASON_ENUM_TO_CODE[crl_reason.reason]
)
backend.openssl_assert(res == 1)
return asn1enum
def _encode_invalidity_date(backend, invalidity_date):
time = backend._lib.ASN1_GENERALIZEDTIME_set(
backend._ffi.NULL, calendar.timegm(
invalidity_date.invalidity_date.timetuple()
)
)
backend.openssl_assert(time != backend._ffi.NULL)
time = backend._ffi.gc(time, backend._lib.ASN1_GENERALIZEDTIME_free)
return time
def _encode_certificate_policies(backend, certificate_policies):
cp = backend._lib.sk_POLICYINFO_new_null()
backend.openssl_assert(cp != backend._ffi.NULL)
cp = backend._ffi.gc(cp, backend._lib.sk_POLICYINFO_free)
for policy_info in certificate_policies:
pi = backend._lib.POLICYINFO_new()
backend.openssl_assert(pi != backend._ffi.NULL)
res = backend._lib.sk_POLICYINFO_push(cp, pi)
backend.openssl_assert(res >= 1)
oid = _txt2obj(backend, policy_info.policy_identifier.dotted_string)
pi.policyid = oid
if policy_info.policy_qualifiers:
pqis = backend._lib.sk_POLICYQUALINFO_new_null()
backend.openssl_assert(pqis != backend._ffi.NULL)
for qualifier in policy_info.policy_qualifiers:
pqi = backend._lib.POLICYQUALINFO_new()
backend.openssl_assert(pqi != backend._ffi.NULL)
res = backend._lib.sk_POLICYQUALINFO_push(pqis, pqi)
backend.openssl_assert(res >= 1)
if isinstance(qualifier, six.text_type):
pqi.pqualid = _txt2obj(
backend, x509.OID_CPS_QUALIFIER.dotted_string
)
pqi.d.cpsuri = _encode_asn1_str(
backend,
qualifier.encode("ascii"),
)
else:
assert isinstance(qualifier, x509.UserNotice)
pqi.pqualid = _txt2obj(
backend, x509.OID_CPS_USER_NOTICE.dotted_string
)
un = backend._lib.USERNOTICE_new()
backend.openssl_assert(un != backend._ffi.NULL)
pqi.d.usernotice = un
if qualifier.explicit_text:
un.exptext = _encode_asn1_utf8_str(
backend, qualifier.explicit_text
)
un.noticeref = _encode_notice_reference(
backend, qualifier.notice_reference
)
pi.qualifiers = pqis
return cp
def _encode_notice_reference(backend, notice):
if notice is None:
return backend._ffi.NULL
else:
nr = backend._lib.NOTICEREF_new()
backend.openssl_assert(nr != backend._ffi.NULL)
# organization is a required field
nr.organization = _encode_asn1_utf8_str(backend, notice.organization)
notice_stack = backend._lib.sk_ASN1_INTEGER_new_null()
nr.noticenos = notice_stack
for number in notice.notice_numbers:
num = _encode_asn1_int(backend, number)
res = backend._lib.sk_ASN1_INTEGER_push(notice_stack, num)
backend.openssl_assert(res >= 1)
return nr
def _txt2obj(backend, name):
"""
Converts a Python string with an ASN.1 object ID in dotted form to a
ASN1_OBJECT.
"""
name = name.encode('ascii')
obj = backend._lib.OBJ_txt2obj(name, 1)
backend.openssl_assert(obj != backend._ffi.NULL)
return obj
def _txt2obj_gc(backend, name):
obj = _txt2obj(backend, name)
obj = backend._ffi.gc(obj, backend._lib.ASN1_OBJECT_free)
return obj
def _encode_ocsp_nocheck(backend, ext):
# Doesn't need to be GC'd
return backend._lib.ASN1_NULL_new()
def _encode_key_usage(backend, key_usage):
set_bit = backend._lib.ASN1_BIT_STRING_set_bit
ku = backend._lib.ASN1_BIT_STRING_new()
ku = backend._ffi.gc(ku, backend._lib.ASN1_BIT_STRING_free)
res = set_bit(ku, 0, key_usage.digital_signature)
backend.openssl_assert(res == 1)
res = set_bit(ku, 1, key_usage.content_commitment)
backend.openssl_assert(res == 1)
res = set_bit(ku, 2, key_usage.key_encipherment)
backend.openssl_assert(res == 1)
res = set_bit(ku, 3, key_usage.data_encipherment)
backend.openssl_assert(res == 1)
res = set_bit(ku, 4, key_usage.key_agreement)
backend.openssl_assert(res == 1)
res = set_bit(ku, 5, key_usage.key_cert_sign)
backend.openssl_assert(res == 1)
res = set_bit(ku, 6, key_usage.crl_sign)
backend.openssl_assert(res == 1)
if key_usage.key_agreement:
res = set_bit(ku, 7, key_usage.encipher_only)
backend.openssl_assert(res == 1)
res = set_bit(ku, 8, key_usage.decipher_only)
backend.openssl_assert(res == 1)
else:
res = set_bit(ku, 7, 0)
backend.openssl_assert(res == 1)
res = set_bit(ku, 8, 0)
backend.openssl_assert(res == 1)
return ku
def _encode_authority_key_identifier(backend, authority_keyid):
akid = backend._lib.AUTHORITY_KEYID_new()
backend.openssl_assert(akid != backend._ffi.NULL)
akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free)
if authority_keyid.key_identifier is not None:
akid.keyid = _encode_asn1_str(
backend,
authority_keyid.key_identifier,
)
if authority_keyid.authority_cert_issuer is not None:
akid.issuer = _encode_general_names(
backend, authority_keyid.authority_cert_issuer
)
if authority_keyid.authority_cert_serial_number is not None:
akid.serial = _encode_asn1_int(
backend, authority_keyid.authority_cert_serial_number
)
return akid
def _encode_basic_constraints(backend, basic_constraints):
constraints = backend._lib.BASIC_CONSTRAINTS_new()
constraints = backend._ffi.gc(
constraints, backend._lib.BASIC_CONSTRAINTS_free
)
constraints.ca = 255 if basic_constraints.ca else 0
if basic_constraints.ca and basic_constraints.path_length is not None:
constraints.pathlen = _encode_asn1_int(
backend, basic_constraints.path_length
)
return constraints
def _encode_authority_information_access(backend, authority_info_access):
aia = backend._lib.sk_ACCESS_DESCRIPTION_new_null()
backend.openssl_assert(aia != backend._ffi.NULL)
aia = backend._ffi.gc(
aia,
lambda x: backend._lib.sk_ACCESS_DESCRIPTION_pop_free(
x, backend._ffi.addressof(
backend._lib._original_lib, "ACCESS_DESCRIPTION_free"
)
)
)
for access_description in authority_info_access:
ad = backend._lib.ACCESS_DESCRIPTION_new()
method = _txt2obj(
backend, access_description.access_method.dotted_string
)
_encode_general_name_preallocated(
backend, access_description.access_location, ad.location
)
ad.method = method
res = backend._lib.sk_ACCESS_DESCRIPTION_push(aia, ad)
backend.openssl_assert(res >= 1)
return aia
def _encode_general_names(backend, names):
general_names = backend._lib.GENERAL_NAMES_new()
backend.openssl_assert(general_names != backend._ffi.NULL)
for name in names:
gn = _encode_general_name(backend, name)
res = backend._lib.sk_GENERAL_NAME_push(general_names, gn)
backend.openssl_assert(res != 0)
return general_names
def _encode_alt_name(backend, san):
general_names = _encode_general_names(backend, san)
general_names = backend._ffi.gc(
general_names, backend._lib.GENERAL_NAMES_free
)
return general_names
def _encode_subject_key_identifier(backend, ski):
return _encode_asn1_str_gc(backend, ski.digest)
def _encode_general_name(backend, name):
gn = backend._lib.GENERAL_NAME_new()
_encode_general_name_preallocated(backend, name, gn)
return gn
def _encode_general_name_preallocated(backend, name, gn):
if isinstance(name, x509.DNSName):
backend.openssl_assert(gn != backend._ffi.NULL)
gn.type = backend._lib.GEN_DNS
ia5 = backend._lib.ASN1_IA5STRING_new()
backend.openssl_assert(ia5 != backend._ffi.NULL)
# ia5strings are supposed to be ITU T.50 but to allow round-tripping
# of broken certs that encode utf8 we'll encode utf8 here too.
value = name.value.encode("utf8")
res = backend._lib.ASN1_STRING_set(ia5, value, len(value))
backend.openssl_assert(res == 1)
gn.d.dNSName = ia5
elif isinstance(name, x509.RegisteredID):
backend.openssl_assert(gn != backend._ffi.NULL)
gn.type = backend._lib.GEN_RID
obj = backend._lib.OBJ_txt2obj(
name.value.dotted_string.encode('ascii'), 1
)
backend.openssl_assert(obj != backend._ffi.NULL)
gn.d.registeredID = obj
elif isinstance(name, x509.DirectoryName):
backend.openssl_assert(gn != backend._ffi.NULL)
dir_name = _encode_name(backend, name.value)
gn.type = backend._lib.GEN_DIRNAME
gn.d.directoryName = dir_name
elif isinstance(name, x509.IPAddress):
backend.openssl_assert(gn != backend._ffi.NULL)
if isinstance(name.value, ipaddress.IPv4Network):
packed = (
name.value.network_address.packed +
utils.int_to_bytes(((1 << 32) - name.value.num_addresses), 4)
)
elif isinstance(name.value, ipaddress.IPv6Network):
packed = (
name.value.network_address.packed +
utils.int_to_bytes((1 << 128) - name.value.num_addresses, 16)
)
else:
packed = name.value.packed
ipaddr = _encode_asn1_str(backend, packed)
gn.type = backend._lib.GEN_IPADD
gn.d.iPAddress = ipaddr
elif isinstance(name, x509.OtherName):
backend.openssl_assert(gn != backend._ffi.NULL)
other_name = backend._lib.OTHERNAME_new()
backend.openssl_assert(other_name != backend._ffi.NULL)
type_id = backend._lib.OBJ_txt2obj(
name.type_id.dotted_string.encode('ascii'), 1
)
backend.openssl_assert(type_id != backend._ffi.NULL)
data = backend._ffi.new("unsigned char[]", name.value)
data_ptr_ptr = backend._ffi.new("unsigned char **")
data_ptr_ptr[0] = data
value = backend._lib.d2i_ASN1_TYPE(
backend._ffi.NULL, data_ptr_ptr, len(name.value)
)
if value == backend._ffi.NULL:
backend._consume_errors()
raise ValueError("Invalid ASN.1 data")
other_name.type_id = type_id
other_name.value = value
gn.type = backend._lib.GEN_OTHERNAME
gn.d.otherName = other_name
elif isinstance(name, x509.RFC822Name):
backend.openssl_assert(gn != backend._ffi.NULL)
# ia5strings are supposed to be ITU T.50 but to allow round-tripping
# of broken certs that encode utf8 we'll encode utf8 here too.
data = name.value.encode("utf8")
asn1_str = _encode_asn1_str(backend, data)
gn.type = backend._lib.GEN_EMAIL
gn.d.rfc822Name = asn1_str
elif isinstance(name, x509.UniformResourceIdentifier):
backend.openssl_assert(gn != backend._ffi.NULL)
# ia5strings are supposed to be ITU T.50 but to allow round-tripping
# of broken certs that encode utf8 we'll encode utf8 here too.
data = name.value.encode("utf8")
asn1_str = _encode_asn1_str(backend, data)
gn.type = backend._lib.GEN_URI
gn.d.uniformResourceIdentifier = asn1_str
else:
raise ValueError(
"{} is an unknown GeneralName type".format(name)
)
def _encode_extended_key_usage(backend, extended_key_usage):
eku = backend._lib.sk_ASN1_OBJECT_new_null()
eku = backend._ffi.gc(eku, backend._lib.sk_ASN1_OBJECT_free)
for oid in extended_key_usage:
obj = _txt2obj(backend, oid.dotted_string)
res = backend._lib.sk_ASN1_OBJECT_push(eku, obj)
backend.openssl_assert(res >= 1)
return eku
_CRLREASONFLAGS = {
x509.ReasonFlags.key_compromise: 1,
x509.ReasonFlags.ca_compromise: 2,
x509.ReasonFlags.affiliation_changed: 3,
x509.ReasonFlags.superseded: 4,
x509.ReasonFlags.cessation_of_operation: 5,
x509.ReasonFlags.certificate_hold: 6,
x509.ReasonFlags.privilege_withdrawn: 7,
x509.ReasonFlags.aa_compromise: 8,
}
def _encode_reasonflags(backend, reasons):
bitmask = backend._lib.ASN1_BIT_STRING_new()
backend.openssl_assert(bitmask != backend._ffi.NULL)
for reason in reasons:
res = backend._lib.ASN1_BIT_STRING_set_bit(
bitmask, _CRLREASONFLAGS[reason], 1
)
backend.openssl_assert(res == 1)
return bitmask
def _encode_full_name(backend, full_name):
dpn = backend._lib.DIST_POINT_NAME_new()
backend.openssl_assert(dpn != backend._ffi.NULL)
dpn.type = _DISTPOINT_TYPE_FULLNAME
dpn.name.fullname = _encode_general_names(backend, full_name)
return dpn
def _encode_relative_name(backend, relative_name):
dpn = backend._lib.DIST_POINT_NAME_new()
backend.openssl_assert(dpn != backend._ffi.NULL)
dpn.type = _DISTPOINT_TYPE_RELATIVENAME
dpn.name.relativename = _encode_sk_name_entry(backend, relative_name)
return dpn
def _encode_cdps_freshest_crl(backend, cdps):
cdp = backend._lib.sk_DIST_POINT_new_null()
cdp = backend._ffi.gc(cdp, backend._lib.sk_DIST_POINT_free)
for point in cdps:
dp = backend._lib.DIST_POINT_new()
backend.openssl_assert(dp != backend._ffi.NULL)
if point.reasons:
dp.reasons = _encode_reasonflags(backend, point.reasons)
if point.full_name:
dp.distpoint = _encode_full_name(backend, point.full_name)
if point.relative_name:
dp.distpoint = _encode_relative_name(backend, point.relative_name)
if point.crl_issuer:
dp.CRLissuer = _encode_general_names(backend, point.crl_issuer)
res = backend._lib.sk_DIST_POINT_push(cdp, dp)
backend.openssl_assert(res >= 1)
return cdp
def _encode_name_constraints(backend, name_constraints):
nc = backend._lib.NAME_CONSTRAINTS_new()
backend.openssl_assert(nc != backend._ffi.NULL)
nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free)
permitted = _encode_general_subtree(
backend, name_constraints.permitted_subtrees
)
nc.permittedSubtrees = permitted
excluded = _encode_general_subtree(
backend, name_constraints.excluded_subtrees
)
nc.excludedSubtrees = excluded
return nc
def _encode_policy_constraints(backend, policy_constraints):
pc = backend._lib.POLICY_CONSTRAINTS_new()
backend.openssl_assert(pc != backend._ffi.NULL)
pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free)
if policy_constraints.require_explicit_policy is not None:
pc.requireExplicitPolicy = _encode_asn1_int(
backend, policy_constraints.require_explicit_policy
)
if policy_constraints.inhibit_policy_mapping is not None:
pc.inhibitPolicyMapping = _encode_asn1_int(
backend, policy_constraints.inhibit_policy_mapping
)
return pc
def _encode_general_subtree(backend, subtrees):
if subtrees is None:
return backend._ffi.NULL
else:
general_subtrees = backend._lib.sk_GENERAL_SUBTREE_new_null()
for name in subtrees:
gs = backend._lib.GENERAL_SUBTREE_new()
gs.base = _encode_general_name(backend, name)
res = backend._lib.sk_GENERAL_SUBTREE_push(general_subtrees, gs)
assert res >= 1
return general_subtrees
def _encode_nonce(backend, nonce):
return _encode_asn1_str_gc(backend, nonce.nonce)
_EXTENSION_ENCODE_HANDLERS = {
ExtensionOID.BASIC_CONSTRAINTS: _encode_basic_constraints,
ExtensionOID.SUBJECT_KEY_IDENTIFIER: _encode_subject_key_identifier,
ExtensionOID.KEY_USAGE: _encode_key_usage,
ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _encode_alt_name,
ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name,
ExtensionOID.EXTENDED_KEY_USAGE: _encode_extended_key_usage,
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier,
ExtensionOID.CERTIFICATE_POLICIES: _encode_certificate_policies,
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: (
_encode_authority_information_access
),
ExtensionOID.CRL_DISTRIBUTION_POINTS: _encode_cdps_freshest_crl,
ExtensionOID.FRESHEST_CRL: _encode_cdps_freshest_crl,
ExtensionOID.INHIBIT_ANY_POLICY: _encode_inhibit_any_policy,
ExtensionOID.OCSP_NO_CHECK: _encode_ocsp_nocheck,
ExtensionOID.NAME_CONSTRAINTS: _encode_name_constraints,
ExtensionOID.POLICY_CONSTRAINTS: _encode_policy_constraints,
}
_CRL_EXTENSION_ENCODE_HANDLERS = {
ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name,
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier,
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: (
_encode_authority_information_access
),
ExtensionOID.CRL_NUMBER: _encode_crl_number_delta_crl_indicator,
ExtensionOID.DELTA_CRL_INDICATOR: _encode_crl_number_delta_crl_indicator,
ExtensionOID.ISSUING_DISTRIBUTION_POINT: _encode_issuing_dist_point,
}
_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS = {
CRLEntryExtensionOID.CERTIFICATE_ISSUER: _encode_alt_name,
CRLEntryExtensionOID.CRL_REASON: _encode_crl_reason,
CRLEntryExtensionOID.INVALIDITY_DATE: _encode_invalidity_date,
}
_OCSP_REQUEST_EXTENSION_ENCODE_HANDLERS = {
OCSPExtensionOID.NONCE: _encode_nonce,
}
_OCSP_BASICRESP_EXTENSION_ENCODE_HANDLERS = {
OCSPExtensionOID.NONCE: _encode_nonce,
}
| apache-2.0 |
mafiya69/sympy | sympy/physics/quantum/matrixutils.py | 87 | 10287 | """Utilities to deal with sympy.Matrix, numpy and scipy.sparse."""
from __future__ import print_function, division
from sympy import Matrix, I, Expr, Integer
from sympy.core.compatibility import range
from sympy.matrices import eye, zeros
from sympy.external import import_module
__all__ = [
'numpy_ndarray',
'scipy_sparse_matrix',
'sympy_to_numpy',
'sympy_to_scipy_sparse',
'numpy_to_sympy',
'scipy_sparse_to_sympy',
'flatten_scalar',
'matrix_dagger',
'to_sympy',
'to_numpy',
'to_scipy_sparse',
'matrix_tensor_product',
'matrix_zeros'
]
# Conditionally define the base classes for numpy and scipy.sparse arrays
# for use in isinstance tests.
np = import_module('numpy')
if not np:
class numpy_ndarray(object):
pass
else:
numpy_ndarray = np.ndarray
scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']})
if not scipy:
class scipy_sparse_matrix(object):
pass
sparse = None
else:
sparse = scipy.sparse
# Try to find spmatrix.
if hasattr(sparse, 'base'):
# Newer versions have it under scipy.sparse.base.
scipy_sparse_matrix = sparse.base.spmatrix
elif hasattr(sparse, 'sparse'):
# Older versions have it under scipy.sparse.sparse.
scipy_sparse_matrix = sparse.sparse.spmatrix
def sympy_to_numpy(m, **options):
"""Convert a sympy Matrix/complex number to a numpy matrix or scalar."""
if not np:
raise ImportError
dtype = options.get('dtype', 'complex')
if isinstance(m, Matrix):
return np.matrix(m.tolist(), dtype=dtype)
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected Matrix or complex scalar, got: %r' % m)
def sympy_to_scipy_sparse(m, **options):
"""Convert a sympy Matrix/complex number to a numpy matrix or scalar."""
if not np or not sparse:
raise ImportError
dtype = options.get('dtype', 'complex')
if isinstance(m, Matrix):
return sparse.csr_matrix(np.matrix(m.tolist(), dtype=dtype))
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected Matrix or complex scalar, got: %r' % m)
def scipy_sparse_to_sympy(m, **options):
"""Convert a scipy.sparse matrix to a sympy matrix."""
return Matrix(m.todense())
def numpy_to_sympy(m, **options):
"""Convert a numpy matrix to a sympy matrix."""
return Matrix(m)
def to_sympy(m, **options):
"""Convert a numpy/scipy.sparse matrix to a sympy matrix."""
if isinstance(m, Matrix):
return m
elif isinstance(m, numpy_ndarray):
return numpy_to_sympy(m)
elif isinstance(m, scipy_sparse_matrix):
return scipy_sparse_to_sympy(m)
elif isinstance(m, Expr):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_numpy(m, **options):
"""Convert a sympy/scipy.sparse matrix to a numpy matrix."""
dtype = options.get('dtype', 'complex')
if isinstance(m, (Matrix, Expr)):
return sympy_to_numpy(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
return m
elif isinstance(m, scipy_sparse_matrix):
return m.todense()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_scipy_sparse(m, **options):
"""Convert a sympy/numpy matrix to a scipy.sparse matrix."""
dtype = options.get('dtype', 'complex')
if isinstance(m, (Matrix, Expr)):
return sympy_to_scipy_sparse(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
if not sparse:
raise ImportError
return sparse.csr_matrix(m)
elif isinstance(m, scipy_sparse_matrix):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def flatten_scalar(e):
"""Flatten a 1x1 matrix to a scalar, return larger matrices unchanged."""
if isinstance(e, Matrix):
if e.shape == (1, 1):
e = e[0]
if isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
if e.shape == (1, 1):
e = complex(e[0, 0])
return e
def matrix_dagger(e):
"""Return the dagger of a sympy/numpy/scipy.sparse matrix."""
if isinstance(e, Matrix):
return e.H
elif isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
return e.conjugate().transpose()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % e)
# TODO: Move this into sympy.matricies.
def _sympy_tensor_product(*matrices):
"""Compute the tensor product of a sequence of sympy Matrices.
This is the standard Kronecker product of matrices [1].
Parameters
==========
matrices : tuple of Matrix instances
The matrices to take the tensor product of.
Returns
=======
matrix : Matrix
The tensor product matrix.
Examples
========
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum.matrixutils import _sympy_tensor_product
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> _sympy_tensor_product(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> _sympy_tensor_product(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
References
==========
[1] http://en.wikipedia.org/wiki/Kronecker_product
"""
# Make sure we have a sequence of Matrices
if not all(isinstance(m, Matrix) for m in matrices):
raise TypeError(
'Sequence of Matrices expected, got: %s' % repr(matrices)
)
# Pull out the first element in the product.
matrix_expansion = matrices[-1]
# Do the tensor product working from right to left.
for mat in reversed(matrices[:-1]):
rows = mat.rows
cols = mat.cols
# Go through each row appending tensor product to.
# running matrix_expansion.
for i in range(rows):
start = matrix_expansion*mat[i*cols]
# Go through each column joining each item
for j in range(cols - 1):
start = start.row_join(
matrix_expansion*mat[i*cols + j + 1]
)
# If this is the first element, make it the start of the
# new row.
if i == 0:
next = start
else:
next = next.col_join(start)
matrix_expansion = next
return matrix_expansion
def _numpy_tensor_product(*product):
"""numpy version of tensor product of multiple arguments."""
if not np:
raise ImportError
answer = product[0]
for item in product[1:]:
answer = np.kron(answer, item)
return answer
def _scipy_sparse_tensor_product(*product):
"""scipy.sparse version of tensor product of multiple arguments."""
if not sparse:
raise ImportError
answer = product[0]
for item in product[1:]:
answer = sparse.kron(answer, item)
# The final matrices will just be multiplied, so csr is a good final
# sparse format.
return sparse.csr_matrix(answer)
def matrix_tensor_product(*product):
"""Compute the matrix tensor product of sympy/numpy/scipy.sparse matrices."""
if isinstance(product[0], Matrix):
return _sympy_tensor_product(*product)
elif isinstance(product[0], numpy_ndarray):
return _numpy_tensor_product(*product)
elif isinstance(product[0], scipy_sparse_matrix):
return _scipy_sparse_tensor_product(*product)
def _numpy_eye(n):
"""numpy version of complex eye."""
if not np:
raise ImportError
return np.matrix(np.eye(n, dtype='complex'))
def _scipy_sparse_eye(n):
"""scipy.sparse version of complex eye."""
if not sparse:
raise ImportError
return sparse.eye(n, n, dtype='complex')
def matrix_eye(n, **options):
"""Get the version of eye and tensor_product for a given format."""
format = options.get('format', 'sympy')
if format == 'sympy':
return eye(n)
elif format == 'numpy':
return _numpy_eye(n)
elif format == 'scipy.sparse':
return _scipy_sparse_eye(n)
raise NotImplementedError('Invalid format: %r' % format)
def _numpy_zeros(m, n, **options):
"""numpy verson of zeros."""
dtype = options.get('dtype', 'float64')
if not np:
raise ImportError
return np.zeros((m, n), dtype=dtype)
def _scipy_sparse_zeros(m, n, **options):
"""scipy.sparse verson of zeros."""
spmatrix = options.get('spmatrix', 'csr')
dtype = options.get('dtype', 'float64')
if not sparse:
raise ImportError
if spmatrix == 'lil':
return sparse.lil_matrix((m, n), dtype=dtype)
elif spmatrix == 'csr':
return sparse.csr_matrix((m, n), dtype=dtype)
def matrix_zeros(m, n, **options):
""""Get a zeros matrix for a given format."""
format = options.get('format', 'sympy')
dtype = options.get('dtype', 'float64')
spmatrix = options.get('spmatrix', 'csr')
if format == 'sympy':
return zeros(m, n)
elif format == 'numpy':
return _numpy_zeros(m, n, **options)
elif format == 'scipy.sparse':
return _scipy_sparse_zeros(m, n, **options)
raise NotImplementedError('Invaild format: %r' % format)
def _numpy_matrix_to_zero(e):
"""Convert a numpy zero matrix to the zero scalar."""
if not np:
raise ImportError
test = np.zeros_like(e)
if np.allclose(e, test):
return 0.0
else:
return e
def _scipy_sparse_matrix_to_zero(e):
"""Convert a scipy.sparse zero matrix to the zero scalar."""
if not np:
raise ImportError
edense = e.todense()
test = np.zeros_like(edense)
if np.allclose(edense, test):
return 0.0
else:
return e
def matrix_to_zero(e):
"""Convert a zero matrix to the scalar zero."""
if isinstance(e, Matrix):
if zeros(*e.shape) == e:
e = Integer(0)
elif isinstance(e, numpy_ndarray):
e = _numpy_matrix_to_zero(e)
elif isinstance(e, scipy_sparse_matrix):
e = _scipy_sparse_matrix_to_zero(e)
return e
| bsd-3-clause |
moondrop-entertainment/django-nonrel-drawp | tests/regressiontests/test_utils/tests.py | 49 | 4833 | import sys
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from models import Person
if sys.version_info >= (2, 5):
from tests_25 import AssertNumQueriesContextManagerTests
class SkippingTestCase(TestCase):
def test_skip_unless_db_feature(self):
"A test that might be skipped is actually called."
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
self.assertRaises(ValueError, test_func)
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
self.assertRaises(ValueError,
self.assertNumQueries, 2, test_func
)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class SaveRestoreWarningState(TestCase):
def test_save_restore_warnings_state(self):
"""
Ensure save_warnings_state/restore_warnings_state work correctly.
"""
# In reality this test could be satisfied by many broken implementations
# of save_warnings_state/restore_warnings_state (e.g. just
# warnings.resetwarnings()) , but it is difficult to test more.
import warnings
self.save_warnings_state()
class MyWarning(Warning):
pass
# Add a filter that causes an exception to be thrown, so we can catch it
warnings.simplefilter("error", MyWarning)
self.assertRaises(Warning, lambda: warnings.warn("warn", MyWarning))
# Now restore.
self.restore_warnings_state()
# After restoring, we shouldn't get an exception. But we don't want a
# warning printed either, so we have to silence the warning.
warnings.simplefilter("ignore", MyWarning)
warnings.warn("warn", MyWarning)
# Remove the filter we just added.
self.restore_warnings_state()
__test__ = {"API_TEST": r"""
# Some checks of the doctest output normalizer.
# Standard doctests do fairly
>>> from django.utils import simplejson
>>> from django.utils.xmlutils import SimplerXMLGenerator
>>> from StringIO import StringIO
>>> def produce_long():
... return 42L
>>> def produce_int():
... return 42
>>> def produce_json():
... return simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2), 'whiz': 42}])
>>> def produce_xml():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startDocument()
... xml.startElement("foo", {"aaa" : "1.0", "bbb": "2.0"})
... xml.startElement("bar", {"ccc" : "3.0"})
... xml.characters("Hello")
... xml.endElement("bar")
... xml.startElement("whiz", {})
... xml.characters("Goodbye")
... xml.endElement("whiz")
... xml.endElement("foo")
... xml.endDocument()
... return stream.getvalue()
>>> def produce_xml_fragment():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startElement("foo", {"aaa": "1.0", "bbb": "2.0"})
... xml.characters("Hello")
... xml.endElement("foo")
... xml.startElement("bar", {"ccc": "3.0", "ddd": "4.0"})
... xml.endElement("bar")
... return stream.getvalue()
# Long values are normalized and are comparable to normal integers ...
>>> produce_long()
42
# ... and vice versa
>>> produce_int()
42L
# JSON output is normalized for field order, so it doesn't matter
# which order json dictionary attributes are listed in output
>>> produce_json()
'["foo", {"bar": ["baz", null, 1.0, 2], "whiz": 42}]'
>>> produce_json()
'["foo", {"whiz": 42, "bar": ["baz", null, 1.0, 2]}]'
# XML output is normalized for attribute order, so it doesn't matter
# which order XML element attributes are listed in output
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo aaa="1.0" bbb="2.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo bbb="2.0" aaa="1.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml_fragment()
'<foo aaa="1.0" bbb="2.0">Hello</foo><bar ccc="3.0" ddd="4.0"></bar>'
>>> produce_xml_fragment()
'<foo bbb="2.0" aaa="1.0">Hello</foo><bar ddd="4.0" ccc="3.0"></bar>'
"""}
| bsd-3-clause |
esakellari/my_root_for_test | interpreter/llvm/src/utils/lldbDataFormatters.py | 20 | 3286 | """
LLDB Formatters for LLVM data types.
Load into LLDB with 'command script import /path/to/lldbDataFormatters.py'
"""
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('type category define -e llvm -l c++')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVectorImpl<.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVector<.+,.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.ArrayRefSynthProvider '
'-x "^llvm::ArrayRef<.+>$"')
# Pretty printer for llvm::SmallVector/llvm::SmallVectorImpl
class SmallVectorSynthProvider:
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
begin = self.begin.GetValueAsUnsigned(0)
end = self.end.GetValueAsUnsigned(0)
return (end - begin)/self.type_size
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
# Do bounds checking.
if index < 0:
return None
if index >= self.num_children():
return None;
offset = index * self.type_size
return self.begin.CreateChildAtOffset('['+str(index)+']',
offset, self.data_type)
def update(self):
self.begin = self.valobj.GetChildMemberWithName('BeginX')
self.end = self.valobj.GetChildMemberWithName('EndX')
the_type = self.valobj.GetType()
# If this is a reference type we have to dereference it to get to the
# template parameter.
if the_type.IsReferenceType():
the_type = the_type.GetDereferencedType()
self.data_type = the_type.GetTemplateArgumentType(0)
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
class ArrayRefSynthProvider:
""" Provider for llvm::ArrayRef """
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
return self.length
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
if index < 0 or index >= self.num_children():
return None;
offset = index * self.type_size
return self.data.CreateChildAtOffset('[' + str(index) + ']',
offset, self.data_type)
def update(self):
self.data = self.valobj.GetChildMemberWithName('Data')
length_obj = self.valobj.GetChildMemberWithName('Length')
self.length = length_obj.GetValueAsUnsigned(0)
self.data_type = self.data.GetType().GetPointeeType()
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
| lgpl-2.1 |
NukeAOSP/external_chromium | chrome/common/extensions/docs/build/build.py | 65 | 8905 | #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Docbuilder for extension docs."""
import os
import os.path
import shutil
import sys
import time
import urllib
from subprocess import Popen, PIPE
from optparse import OptionParser
_script_path = os.path.realpath(__file__)
_build_dir = os.path.dirname(_script_path)
_base_dir = os.path.normpath(_build_dir + "/..")
_webkit_dir = _base_dir + "/../../../../third_party/WebKit"
_devtools_dir = _webkit_dir + "/Source/WebCore/inspector/front-end"
_static_dir = _base_dir + "/static"
_js_dir = _base_dir + "/js"
_template_dir = _base_dir + "/template"
_samples_dir = _base_dir + "/examples"
_extension_api_dir = os.path.normpath(_base_dir + "/../api")
_extension_api_json = _extension_api_dir + "/extension_api.json"
_devtools_api_json = _devtools_dir + "/ExtensionAPISchema.json"
_api_template_html = _template_dir + "/api_template.html"
_page_shell_html = _template_dir + "/page_shell.html"
_generator_html = _build_dir + "/generator.html"
_samples_json = _base_dir + "/samples.json"
_expected_output_preamble = "#BEGIN"
_expected_output_postamble = "#END"
# HACK! This is required because we can only depend on python 2.4 and
# the calling environment may not be setup to set the PYTHONPATH
sys.path.append(os.path.normpath(_base_dir +
"/../../../../third_party"))
import simplejson as json
from directory import Sample
from directory import ApiManifest
from directory import SamplesManifest
def RenderPages(names, dump_render_tree):
"""
Calls DumpRenderTree .../generator.html?<names> and writes the
results to .../docs/<name>.html
"""
if not names:
raise Exception("RenderPage called with empty names param")
generator_url = "file:" + urllib.pathname2url(_generator_html)
generator_url += "?" + ",".join(names)
# Start with a fresh copy of page shell for each file.
# Save the current contents so that we can look for changes later.
originals = {}
for name in names:
input_file = _base_dir + "/" + name + ".html"
if (os.path.isfile(input_file)):
originals[name] = open(input_file, 'rb').read()
os.remove(input_file)
else:
originals[name] = ""
shutil.copy(_page_shell_html, input_file)
# Run DumpRenderTree and capture result
dump_render_tree_timeout = 1000 * 60 * 5 # five minutes
p = Popen(
[dump_render_tree, "--test-shell",
"%s %s" % (generator_url, dump_render_tree_timeout)],
stdout=PIPE)
# The remaining output will be the content of the generated pages.
output = p.stdout.read()
# Parse out just the JSON part.
begin = output.find(_expected_output_preamble)
end = output.rfind(_expected_output_postamble)
if (begin < 0 or end < 0):
raise Exception("%s returned invalid output:\n\n%s" %
(dump_render_tree, output))
begin += len(_expected_output_preamble)
try:
output_parsed = json.loads(output[begin:end])
except ValueError, msg:
raise Exception("Could not parse DumpRenderTree output as JSON. Error: " +
msg + "\n\nOutput was:\n" + output)
changed_files = []
for name in names:
result = output_parsed[name].encode("utf8") + '\n'
# Remove CRs that are appearing from captured DumpRenderTree output.
result = result.replace('\r', '')
# Remove page_shell
input_file = _base_dir + "/" + name + ".html"
os.remove(input_file)
# Write output
open(input_file, 'wb').write(result)
if (originals[name] and result != originals[name]):
changed_files.append(input_file)
return changed_files
def FindDumpRenderTree():
# This is hacky. It is used to guess the location of the DumpRenderTree
chrome_dir = os.path.normpath(_base_dir + "/../../../")
src_dir = os.path.normpath(chrome_dir + "/../")
search_locations = []
if (sys.platform in ('cygwin', 'win32')):
home_dir = os.path.normpath(os.getenv("HOMEDRIVE") + os.getenv("HOMEPATH"))
search_locations.append(chrome_dir + "/Release/DumpRenderTree.exe")
search_locations.append(chrome_dir + "/Debug/DumpRenderTree.exe")
search_locations.append(home_dir + "/bin/DumpRenderTree/"
"DumpRenderTree.exe")
if (sys.platform in ('linux', 'linux2')):
search_locations.append(src_dir + "/sconsbuild/Release/DumpRenderTree")
search_locations.append(src_dir + "/out/Release/DumpRenderTree")
search_locations.append(src_dir + "/sconsbuild/Debug/DumpRenderTree")
search_locations.append(src_dir + "/out/Debug/DumpRenderTree")
search_locations.append(os.getenv("HOME") + "/bin/DumpRenderTree/"
"DumpRenderTree")
if (sys.platform == 'darwin'):
search_locations.append(src_dir +
"/xcodebuild/Release/DumpRenderTree.app/Contents/MacOS/DumpRenderTree")
search_locations.append(src_dir +
"/xcodebuild/Debug/DumpRenderTree.app/Contents/MacOS/DumpRenderTree")
search_locations.append(os.getenv("HOME") + "/bin/DumpRenderTree/" +
"DumpRenderTree.app/Contents/MacOS/DumpRenderTree")
for loc in search_locations:
if os.path.isfile(loc):
return loc
raise Exception("Could not find DumpRenderTree executable\n"
"**DumpRenderTree may need to be built**\n"
"Searched: \n" + "\n".join(search_locations) + "\n"
"To specify a path to DumpRenderTree use "
"--dump-render-tree-path")
def GetStaticFileNames():
static_files = os.listdir(_static_dir)
return set(os.path.splitext(file_name)[0]
for file_name in static_files
if file_name.endswith(".html") and not file_name.startswith("."))
def main():
# Prevent windows from using cygwin python.
if (sys.platform == "cygwin"):
sys.exit("Building docs not supported for cygwin python. Please run the "
"build.sh script instead, which uses depot_tools python.")
parser = OptionParser()
parser.add_option("--dump-render-tree-path", dest="dump_render_tree_path",
metavar="PATH",
help="path to DumpRenderTree executable")
parser.add_option("--page-name", dest="page_name", metavar="PAGE",
help="only generate docs for PAGE.html")
parser.add_option("--nozip", dest="zips", action="store_false",
help="do not generate zip files for samples",
default=True)
options, args = parser.parse_args()
if (options.dump_render_tree_path and
os.path.isfile(options.dump_render_tree_path)):
dump_render_tree = options.dump_render_tree_path
else:
dump_render_tree = FindDumpRenderTree()
# Load the manifest of existing API Methods
api_manifest = ApiManifest(_extension_api_json)
# DevTools API is maintained separately, in WebCore land
devtools_api_manifest = ApiManifest(_devtools_api_json)
# Read static file names
static_names = GetStaticFileNames()
# Read module names
module_names = (api_manifest.getModuleNames() |
devtools_api_manifest.getModuleNames())
# All pages to generate
page_names = static_names | module_names
# Allow the user to render a single page if they want
if options.page_name:
if options.page_name in page_names:
page_names = [options.page_name]
else:
raise Exception("--page-name argument must be one of %s." %
', '.join(sorted(page_names)))
# Render a manifest file containing metadata about all the extension samples
samples_manifest = SamplesManifest(_samples_dir, _base_dir, api_manifest)
samples_manifest.writeToFile(_samples_json)
# Write zipped versions of the samples listed in the manifest to the
# filesystem, unless the user has disabled it
if options.zips:
modified_zips = samples_manifest.writeZippedSamples()
else:
modified_zips = []
modified_files = RenderPages(page_names, dump_render_tree)
modified_files.extend(modified_zips)
if len(modified_files) == 0:
print "Output files match existing files. No changes made."
else:
print ("ATTENTION: EXTENSION DOCS HAVE CHANGED\n" +
"The following files have been modified and should be checked\n" +
"into source control (ideally in the same changelist as the\n" +
"underlying files that resulting in their changing).")
for f in modified_files:
print " * %s" % f
# Hack. Sleep here, otherwise windows doesn't properly close the debug.log
# and the os.remove will fail with a "Permission denied".
time.sleep(1)
debug_log = os.path.normpath(_build_dir + "/" + "debug.log")
if (os.path.isfile(debug_log)):
os.remove(debug_log)
if 'EX_OK' in dir(os):
return os.EX_OK
else:
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
yarothetimble/todo | todo/settings.py | 1 | 3215 | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3jjx0203l9=el9k%4x$jbw+y)q!+_l3=sd!l_d_a)mez1vb4uv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'tasks.apps.TasksConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'todo_db',
'USER': 'todo_usr',
'PASSWORD': 'todo_pw',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| mit |
walchko/pygecko | retired/old_version/original/bin/mjpeg_server.py | 1 | 4736 | #!/usr/bin/env python
import cv2
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import time
import argparse
from opencvutils import Camera
import socket as Socket
# import errno
# threaded version
# http://stackoverflow.com/questions/12650238/processing-simultaneous-asynchronous-requests-with-python-basehttpserver
# not sure flask is any better:
# https://blog.miguelgrinberg.com/post/video-streaming-with-flask
#``mjpeg_server``
# * only handles one connection at a time ... make threaded?
# * sometimes the video stream is slow to load, but then it works fine
# * handle client disconnect (broken pipe - 32) better
def compress(orig, comp):
return float(orig) / float(comp)
class mjpgServer(BaseHTTPRequestHandler):
"""
A simple mjpeg server that either publishes images directly from a camera
or republishes images from another pygecko process.
"""
cam = None
cameratype = 'cv'
host = None
win = (640, 480)
def __del__(self):
if self.cam:
self.cam.close()
self.cam = None
print 'Exiting mjpgServer'
def setUpCamera(self):
"""
cv - camera number, usually 0
pi - set to True
"""
print 'window size:', self.win
if self.cameratype == 'pi':
self.cam = Camera('pi')
self.cam.init(win=self.win)
elif self.cameratype == 'cv':
self.cam = Camera('cv')
self.cam.init(cameraNumber='cv', win=self.win)
else:
raise Exception('Error, you must specify "cv" or "pi" for camera type')
time.sleep(3)
def do_GET(self):
print 'connection from:', self.address_string()
if self.path == '/mjpg':
print 'mjpg'
self.send_response(200)
self.send_header(
'Content-type',
'multipart/x-mixed-replace; boundary=--jpgboundary'
)
self.end_headers()
while True:
if self.cam:
# print 'grab image'
ret, img = self.cam.read()
else:
# print 'setupcamera()'
self.setUpCamera()
ret = False
# ret, img = self.getImage()
if not ret:
# print 'crap'
time.sleep(1)
continue
ret, jpg = cv2.imencode('.jpg', img)
# print 'Compression ratio: %d4.0:1'%(compress(img.size,jpg.size))
self.wfile.write("--jpgboundary")
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length',str(tmpFile.len))
self.send_header('Content-length', str(jpg.size))
self.end_headers()
self.wfile.write(jpg.tostring())
time.sleep(0.05)
elif self.path == '/':
ip = self.host[0]
port = self.host[1]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s}:{1!s}</h1>'.format(ip, port))
self.wfile.write('<img src="http://{}:{}/mjpg"/>'.format(ip, port))
self.wfile.write('<p>{0!s}</p>'.format((self.version_string())))
self.wfile.write('</p></ul>')
self.wfile.write('<p>This only handles one connection at a time</p>')
self.wfile.write('</body></html>')
else:
print 'error', self.path
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s} not found</h1>'.format(self.path))
self.wfile.write('</body></html>')
def handleArgs():
parser = argparse.ArgumentParser(description='A simple mjpeg server Example: mjpeg-server -p 8080 --camera 4')
parser.add_argument('-p', '--port', help='local publisher port, default is 9000', type=int, default=9000)
# parser.add_argument('-c', '--camera', help='set opencv camera number, ex. -c 1', type=int, default=0)
parser.add_argument('-t', '--type', help='set type of camera: cv or pi, ex. -t pi', default='cv')
parser.add_argument('-s', '--size', help='set size', nargs=2, type=int, default=(640, 480))
# parser.add_argument('-r', '--remote', help='remote host image subscription info, hostname/ip port, ex: 1.2.3.4 9000', nargs=2, default=('0.0.0.0', 9000))
args = vars(parser.parse_args())
args['size'] = (args['size'][0], args['size'][1])
# args['remote'] = (args['remote'][0], args['remote'][1])
return args
def main():
args = handleArgs()
# figure out host info
hostname = Socket.gethostname()
if hostname.find('.local') == -1:
hostname += '.local'
ip = Socket.gethostbyname(hostname)
hostinfo = (ip, args['port'])
try:
mjpgServer.topic = 'image_color'
mjpgServer.cameratype = 'pi'
mjpgServer.host = hostinfo
mjpgServer.win = args['size']
server = HTTPServer(hostinfo, mjpgServer)
print "server started on: {}:{}".format(ip, args['port'])
server.serve_forever()
except KeyboardInterrupt:
print 'KeyboardInterrupt'
server.socket.close()
exit(0)
if __name__ == '__main__':
main()
| mit |
fjbatresv/odoo | addons/membership/membership.py | 21 | 27956 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
STATE_PRIOR = {
'none': 0,
'canceled': 1,
'old': 2,
'waiting': 3,
'invoiced': 4,
'free': 6,
'paid': 7
}
class membership_line(osv.osv):
'''Member line'''
def _get_partners(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if partner.member_lines:
list_membership_line += member_line_obj.search(cr, uid, [('id', 'in', [ l.id for l in partner.member_lines])], context=context)
return list_membership_line
def _get_membership_lines(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.pool.get('account.invoice').browse(cr, uid, ids, context=context):
if invoice.invoice_line:
list_membership_line += member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [ l.id for l in invoice.invoice_line])], context=context)
return list_membership_line
def _check_membership_date(self, cr, uid, ids, context=None):
"""Check if membership product is not in the past
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param context: A standard dictionary for contextual values
"""
cr.execute('''
SELECT MIN(ml.date_to - ai.date_invoice)
FROM membership_membership_line ml
JOIN account_invoice_line ail ON (
ml.account_invoice_line = ail.id
)
JOIN account_invoice ai ON (
ai.id = ail.invoice_id)
WHERE ml.id IN %s''', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[0] and r[0] < 0:
return False
return True
def _state(self, cr, uid, ids, name, args, context=None):
"""Compute the state lines
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of state Value
"""
res = {}
inv_obj = self.pool.get('account.invoice')
for line in self.browse(cr, uid, ids, context=context):
cr.execute('''
SELECT i.state, i.id FROM
account_invoice i
WHERE
i.id = (
SELECT l.invoice_id FROM
account_invoice_line l WHERE
l.id = (
SELECT ml.account_invoice_line FROM
membership_membership_line ml WHERE
ml.id = %s
)
)
''', (line.id,))
fetched = cr.fetchone()
if not fetched:
res[line.id] = 'canceled'
continue
istate = fetched[0]
state = 'none'
if (istate == 'draft') | (istate == 'proforma'):
state = 'waiting'
elif istate == 'open':
state = 'invoiced'
elif istate == 'paid':
state = 'paid'
inv = inv_obj.browse(cr, uid, fetched[1], context=context)
for payment in inv.payment_ids:
if payment.invoice and payment.invoice.type == 'out_refund':
state = 'canceled'
elif istate == 'cancel':
state = 'canceled'
res[line.id] = state
return res
_description = __doc__
_name = 'membership.membership_line'
_columns = {
'partner': fields.many2one('res.partner', 'Partner', ondelete='cascade', select=1),
'membership_id': fields.many2one('product.product', string="Membership", required=True),
'date_from': fields.date('From', readonly=True),
'date_to': fields.date('To', readonly=True),
'date_cancel': fields.date('Cancel date'),
'date': fields.date('Join Date', help="Date on which member has joined the membership"),
'member_price': fields.float('Membership Fee', digits_compute= dp.get_precision('Product Price'), required=True, help='Amount for the membership'),
'account_invoice_line': fields.many2one('account.invoice.line', 'Account Invoice line', readonly=True),
'account_invoice_id': fields.related('account_invoice_line', 'invoice_id', type='many2one', relation='account.invoice', string='Invoice', readonly=True),
'state': fields.function(_state,
string='Membership Status', type='selection',
selection=STATE, store = {
'account.invoice': (_get_membership_lines, ['state'], 10),
'res.partner': (_get_partners, ['membership_state'], 12),
}, help="""It indicates the membership status.
-Non Member: A member who has not applied for any membership.
-Cancelled Member: A member who has cancelled his membership.
-Old Member: A member whose membership date has expired.
-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.
-Invoiced Member: A member whose invoice has been created.
-Paid Member: A member who has paid the membership amount."""),
'company_id': fields.related('account_invoice_line', 'invoice_id', 'company_id', type="many2one", relation="res.company", string="Company", readonly=True, store=True)
}
_rec_name = 'partner'
_order = 'id desc'
_constraints = [
(_check_membership_date, 'Error, this membership product is out of date', [])
]
class Partner(osv.osv):
'''Partner'''
_inherit = 'res.partner'
def _get_partner_id(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
res_obj = self.pool.get('res.partner')
data_inv = member_line_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _get_invoice_partner(self, cr, uid, ids, context=None):
inv_obj = self.pool.get('account.invoice')
res_obj = self.pool.get('res.partner')
data_inv = inv_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner_id.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _cron_update_membership(self, cr, uid, context=None):
partner_ids = self.search(cr, uid, [('membership_state', '=', 'paid')], context=context)
if partner_ids:
self._store_set_values(cr, uid, partner_ids, ['membership_state'], context=context)
def _membership_state(self, cr, uid, ids, name, args, context=None):
"""This Function return Membership State For Given Partner.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Partner IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of Membership state Value
"""
res = {}
for id in ids:
res[id] = 'none'
today = time.strftime('%Y-%m-%d')
for id in ids:
partner_data = self.browse(cr, uid, id, context=context)
if partner_data.membership_cancel and today > partner_data.membership_cancel:
res[id] = 'free' if partner_data.free_member else 'canceled'
continue
if partner_data.membership_stop and today > partner_data.membership_stop:
res[id] = 'free' if partner_data.free_member else 'old'
continue
s = 4
if partner_data.member_lines:
for mline in partner_data.member_lines:
if mline.date_to >= today and mline.date_from < today:
if mline.account_invoice_line and mline.account_invoice_line.invoice_id:
mstate = mline.account_invoice_line.invoice_id.state
if mstate == 'paid':
s = 0
inv = mline.account_invoice_line.invoice_id
for payment in inv.payment_ids:
if payment.invoice.type == 'out_refund':
s = 2
break
elif mstate == 'open' and s!=0:
s = 1
elif mstate == 'cancel' and s!=0 and s!=1:
s = 2
elif (mstate == 'draft' or mstate == 'proforma') and s!=0 and s!=1:
s = 3
if s==4:
for mline in partner_data.member_lines:
if mline.date_from < today and mline.date_to < today and mline.date_from <= mline.date_to and (mline.account_invoice_line and mline.account_invoice_line.invoice_id.state) == 'paid':
s = 5
else:
s = 6
if s==0:
res[id] = 'paid'
elif s==1:
res[id] = 'invoiced'
elif s==2:
res[id] = 'canceled'
elif s==3:
res[id] = 'waiting'
elif s==5:
res[id] = 'old'
elif s==6:
res[id] = 'none'
if partner_data.free_member and s!=0:
res[id] = 'free'
if partner_data.associate_member:
res_state = self._membership_state(cr, uid, [partner_data.associate_member.id], name, args, context=context)
res[id] = res_state[partner_data.associate_member.id]
return res
def _membership_date(self, cr, uid, ids, name, args, context=None):
"""Return date of membership"""
name = name[0]
res = {}
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.browse(cr, uid, ids, context=context):
if partner.associate_member:
partner_id = partner.associate_member.id
else:
partner_id = partner.id
res[partner.id] = {
'membership_start': False,
'membership_stop': False,
'membership_cancel': False
}
if name == 'membership_start':
line_id = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_from', context=context)
if line_id:
res[partner.id]['membership_start'] = member_line_obj.read(cr, uid, [line_id[0]],
['date_from'], context=context)[0]['date_from']
if name == 'membership_stop':
line_id1 = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_to desc', context=context)
if line_id1:
res[partner.id]['membership_stop'] = member_line_obj.read(cr, uid, [line_id1[0]],
['date_to'], context=context)[0]['date_to']
if name == 'membership_cancel':
if partner.membership_state == 'canceled':
line_id2 = member_line_obj.search(cr, uid, [('partner', '=', partner.id)], limit=1, order='date_cancel', context=context)
if line_id2:
res[partner.id]['membership_cancel'] = member_line_obj.read(cr, uid, [line_id2[0]], ['date_cancel'], context=context)[0]['date_cancel']
return res
def _get_partners(self, cr, uid, ids, context=None):
ids2 = ids
while ids2:
ids2 = self.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
ids += ids2
return ids
def __get_membership_state(self, *args, **kwargs):
return self._membership_state(*args, **kwargs)
_columns = {
'associate_member': fields.many2one('res.partner', 'Associate Member',help="A member with whom you want to associate your membership.It will consider the membership state of the associated member."),
'member_lines': fields.one2many('membership.membership_line', 'partner', 'Membership'),
'free_member': fields.boolean('Free Member', help = "Select if you want to give free membership."),
'membership_amount': fields.float(
'Membership Amount', digits=(16, 2),
help = 'The price negotiated by the partner'),
'membership_state': fields.function(
__get_membership_state,
string = 'Current Membership Status', type = 'selection',
selection = STATE,
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help='It indicates the membership state.\n'
'-Non Member: A partner who has not applied for any membership.\n'
'-Cancelled Member: A member who has cancelled his membership.\n'
'-Old Member: A member whose membership date has expired.\n'
'-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.\n'
'-Invoiced Member: A member whose invoice has been created.\n'
'-Paying member: A member who has paid the membership fee.'),
'membership_start': fields.function(
_membership_date, multi = 'membeship_start',
string = 'Membership Start Date', type = 'date',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10, ),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date from which membership becomes active."),
'membership_stop': fields.function(
_membership_date,
string = 'Membership End Date', type='date', multi='membership_stop',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date until which membership remains active."),
'membership_cancel': fields.function(
_membership_date,
string = 'Cancel Membership Date', type='date', multi='membership_cancel',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 11),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date on which membership has been cancelled"),
}
_defaults = {
'free_member': False,
'membership_cancel': False,
}
def _check_recursion(self, cr, uid, ids, context=None):
"""Check Recursive for Associated Members.
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT associate_member FROM res_partner WHERE id IN %s', (tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive associated members.', ['associate_member'])
]
def create_membership_invoice(self, cr, uid, ids, product_id=None, datas=None, context=None):
""" Create Customer Invoice of Membership for partners.
@param datas: datas has dictionary value which consist Id of Membership product and Cost Amount of Membership.
datas = {'membership_product_id': None, 'amount': None}
"""
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_tax_obj = self.pool.get('account.invoice.tax')
product_id = product_id or datas.get('membership_product_id', False)
amount = datas.get('amount', 0.0)
invoice_list = []
if type(ids) in (int, long,):
ids = [ids]
for partner in self.browse(cr, uid, ids, context=context):
account_id = partner.property_account_receivable and partner.property_account_receivable.id or False
fpos_id = partner.property_account_position and partner.property_account_position.id or False
addr = self.address_get(cr, uid, [partner.id], ['invoice'])
if partner.free_member:
raise osv.except_osv(_('Error!'),
_("Partner is a free Member."))
if not addr.get('invoice', False):
raise osv.except_osv(_('Error!'),
_("Partner doesn't have an address to make the invoice."))
quantity = 1
line_value = {
'product_id': product_id,
}
line_dict = invoice_line_obj.product_id_change(cr, uid, {},
product_id, False, quantity, '', 'out_invoice', partner.id, fpos_id, price_unit=amount, context=context)
line_value.update(line_dict['value'])
line_value['price_unit'] = amount
if line_value.get('invoice_line_tax_id', False):
tax_tab = [(6, 0, line_value['invoice_line_tax_id'])]
line_value['invoice_line_tax_id'] = tax_tab
invoice_id = invoice_obj.create(cr, uid, {
'partner_id': partner.id,
'account_id': account_id,
'fiscal_position': fpos_id or False
}, context=context)
line_value['invoice_id'] = invoice_id
invoice_line_obj.create(cr, uid, line_value, context=context)
invoice_list.append(invoice_id)
if line_value['invoice_line_tax_id']:
tax_value = invoice_tax_obj.compute(cr, uid, invoice_id).values()
for tax in tax_value:
invoice_tax_obj.create(cr, uid, tax, context=context)
#recompute the membership_state of those partners
self.pool.get('res.partner').write(cr, uid, ids, {})
return invoice_list
class Product(osv.osv):
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
model_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
if ('product' in context) and (context['product']=='membership_product'):
model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
resource_id_form = model_obj.read(cr, user, model_data_ids_form, fields=['res_id', 'name'], context=context)
dict_model = {}
for i in resource_id_form:
dict_model[i['name']] = i['res_id']
if view_type == 'form':
view_id = dict_model['membership_products_form']
else:
view_id = dict_model['membership_products_tree']
return super(Product,self).fields_view_get(cr, user, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
'''Product'''
_inherit = 'product.template'
_columns = {
'membership': fields.boolean('Membership', help='Check if the product is eligible for membership.'),
'membership_date_from': fields.date('Membership Start Date', help='Date from which membership becomes active.'),
'membership_date_to': fields.date('Membership End Date', help='Date until which membership remains active.'),
}
_sql_constraints = [('membership_date_greater','check(membership_date_to >= membership_date_from)','Error ! Ending Date cannot be set before Beginning Date.')]
_defaults = {
'membership': False,
}
class Invoice(osv.osv):
'''Invoice'''
_inherit = 'account.invoice'
def action_cancel(self, cr, uid, ids, context=None):
'''Create a 'date_cancel' on the membership_line object'''
member_line_obj = self.pool.get('membership.membership_line')
today = time.strftime('%Y-%m-%d')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.write(cr, uid, mlines, {'date_cancel': today})
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
class account_invoice_line(osv.osv):
_inherit='account.invoice.line'
def write(self, cr, uid, ids, vals, context=None):
"""Overrides orm write method
"""
member_line_obj = self.pool.get('membership.membership_line')
res = super(account_invoice_line, self).write(cr, uid, ids, vals, context=context)
for line in self.browse(cr, uid, ids, context=context):
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line has changed to a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id.id,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
if line.product_id and not line.product_id.membership and ml_ids:
# Product line has changed to a non membership product
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
"""Remove Membership Line Record for Account Invoice Line
"""
member_line_obj = self.pool.get('membership.membership_line')
for id in ids:
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', id)], context=context)
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return super(account_invoice_line, self).unlink(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
"""Overrides orm create method
"""
member_line_obj = self.pool.get('membership.membership_line')
result = super(account_invoice_line, self).create(cr, uid, vals, context=context)
line = self.browse(cr, uid, result, context=context)
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line is a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id and line.invoice_id.partner_id.id or False,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
XiaosongWei/crosswalk-test-suite | apptools/apptools-android-tests/apptools/create_basic.py | 3 | 2943 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<[email protected]>
# Yun, Liu<[email protected]>
import unittest
import os
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_dir_exist(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app create org.xwalk.test --android-crosswalk=" + \
comm.crosswalkVersion
return_code = os.system(cmd)
self.assertNotEquals(return_code, 0)
comm.clear("org.xwalk.test")
def test_main_activity(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test/prj/android')
fp = open(os.getcwd() + '/AndroidManifest.xml')
lines = fp.readlines()
for i in range(len(lines)):
line = lines[i].strip(' ').strip('\n\t')
findLine = "<activity"
if i <= len(lines):
if findLine in line:
print "Find"
start = line.index("name")
self.assertIn('MainActivity', line[start:])
break
else:
print "Continue find"
else:
self.assertIn(findLine, line)
fp.close()
comm.clear("org.xwalk.test")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
williamthegrey/swift | test/unit/common/middleware/test_xprofile.py | 5 | 23334 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import tempfile
import unittest
from nose import SkipTest
from six import BytesIO
from swift import gettext_ as _
from swift.common.swob import Request, Response
try:
from swift.common.middleware import xprofile
from swift.common.middleware.xprofile import ProfileMiddleware
from swift.common.middleware.x_profile.exceptions import (
MethodNotAllowed, NotFoundException, ODFLIBNotInstalled,
PLOTLIBNotInstalled)
from swift.common.middleware.x_profile.html_viewer import (
HTMLViewer, PLOTLIB_INSTALLED)
from swift.common.middleware.x_profile.profile_model import (
ODFLIB_INSTALLED, ProfileLog, Stats2)
except ImportError:
xprofile = None
class FakeApp(object):
def __call__(self, env, start_response):
req = Request(env)
return Response(request=req, body='FAKE APP')(
env, start_response)
class TestXProfile(unittest.TestCase):
def test_get_profiler(self):
if xprofile is None:
raise SkipTest
self.assertTrue(xprofile.get_profiler('cProfile') is not None)
self.assertTrue(xprofile.get_profiler('eventlet.green.profile')
is not None)
class TestProfilers(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.profilers = [xprofile.get_profiler('cProfile'),
xprofile.get_profiler('eventlet.green.profile')]
def fake_func(self, *args, **kw):
return len(args) + len(kw)
def test_runcall(self):
for p in self.profilers:
v = p.runcall(self.fake_func, 'one', 'two', {'key1': 'value1'})
self.assertEqual(v, 3)
def test_runctx(self):
for p in self.profilers:
p.runctx('import os;os.getcwd();', globals(), locals())
p.snapshot_stats()
self.assertTrue(p.stats is not None)
self.assertTrue(len(p.stats.keys()) > 0)
class TestProfileMiddleware(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.got_statuses = []
self.app = ProfileMiddleware(FakeApp, {})
self.tempdir = os.path.dirname(self.app.log_filename_prefix)
self.pids = ['123', '456', str(os.getpid())]
profiler = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids:
path = self.app.log_filename_prefix + pid
profiler.runctx('import os;os.getcwd();', globals(), locals())
profiler.dump_stats(path)
profiler.runctx('import os;os.getcwd();', globals(), locals())
profiler.dump_stats(path + '.tmp')
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def get_app(self, app, global_conf, **local_conf):
factory = xprofile.filter_factory(global_conf, **local_conf)
return factory(app)
def start_response(self, status, headers):
self.got_statuses = [status]
self.headers = headers
def test_combine_body_qs(self):
body = (b"profile=all&sort=time&limit=-1&fulldirs=1"
b"&nfl_filter=__call__&query=query&metric=nc&format=default")
wsgi_input = BytesIO(body)
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'profile=all&format=json',
'wsgi.input': wsgi_input}
req = Request.blank('/__profile__/', environ=environ)
query_dict = self.app._combine_body_qs(req)
self.assertEqual(query_dict['profile'], ['all'])
self.assertEqual(query_dict['sort'], ['time'])
self.assertEqual(query_dict['limit'], ['-1'])
self.assertEqual(query_dict['fulldirs'], ['1'])
self.assertEqual(query_dict['nfl_filter'], ['__call__'])
self.assertEqual(query_dict['query'], ['query'])
self.assertEqual(query_dict['metric'], ['nc'])
self.assertEqual(query_dict['format'], ['default'])
def test_call(self):
body = b"sort=time&limit=-1&fulldirs=1&nfl_filter=&metric=nc"
wsgi_input = BytesIO(body + b'&query=query')
environ = {'HTTP_HOST': 'localhost:8080',
'PATH_INFO': '/__profile__',
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'profile=all&format=json',
'wsgi.input': wsgi_input}
resp = self.app(environ, self.start_response)
self.assertTrue(resp[0].find('<html>') > 0, resp)
self.assertEqual(self.got_statuses, ['200 OK'])
self.assertEqual(self.headers, [('content-type', 'text/html')])
wsgi_input = BytesIO(body + b'&plot=plot')
environ['wsgi.input'] = wsgi_input
if PLOTLIB_INSTALLED:
resp = self.app(environ, self.start_response)
self.assertEqual(self.got_statuses, ['200 OK'])
self.assertEqual(self.headers, [('content-type', 'image/jpg')])
else:
resp = self.app(environ, self.start_response)
self.assertEqual(self.got_statuses, ['500 Internal Server Error'])
wsgi_input = BytesIO(body + '&download=download&format=default')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
self.assertEqual(self.headers, [('content-type',
HTMLViewer.format_dict['default'])])
wsgi_input = BytesIO(body + '&download=download&format=json')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
self.assertTrue(self.headers == [('content-type',
HTMLViewer.format_dict['json'])])
env2 = environ.copy()
env2['REQUEST_METHOD'] = 'DELETE'
resp = self.app(env2, self.start_response)
self.assertEqual(self.got_statuses, ['405 Method Not Allowed'], resp)
# use a totally bogus profile identifier
wsgi_input = BytesIO(body + b'&profile=ABC&download=download')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
self.assertEqual(self.got_statuses, ['404 Not Found'], resp)
wsgi_input = BytesIO(body + b'&download=download&format=ods')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
if ODFLIB_INSTALLED:
self.assertEqual(self.headers, [('content-type',
HTMLViewer.format_dict['ods'])])
else:
self.assertEqual(self.got_statuses, ['500 Internal Server Error'])
def test_dump_checkpoint(self):
self.app.dump_checkpoint()
self.assertTrue(self.app.last_dump_at is not None)
def test_renew_profile(self):
old_profiler = self.app.profiler
self.app.renew_profile()
new_profiler = self.app.profiler
self.assertTrue(old_profiler != new_profiler)
class Test_profile_log(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.dir1 = tempfile.mkdtemp()
self.log_filename_prefix1 = self.dir1 + '/unittest.profile'
self.profile_log1 = ProfileLog(self.log_filename_prefix1, False)
self.pids1 = ['123', '456', str(os.getpid())]
profiler1 = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids1:
profiler1.runctx('import os;os.getcwd();', globals(), locals())
self.profile_log1.dump_profile(profiler1, pid)
self.dir2 = tempfile.mkdtemp()
self.log_filename_prefix2 = self.dir2 + '/unittest.profile'
self.profile_log2 = ProfileLog(self.log_filename_prefix2, True)
self.pids2 = ['321', '654', str(os.getpid())]
profiler2 = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids2:
profiler2.runctx('import os;os.getcwd();', globals(), locals())
self.profile_log2.dump_profile(profiler2, pid)
def tearDown(self):
self.profile_log1.clear('all')
self.profile_log2.clear('all')
shutil.rmtree(self.dir1, ignore_errors=True)
shutil.rmtree(self.dir2, ignore_errors=True)
def test_get_all_pids(self):
self.assertEqual(self.profile_log1.get_all_pids(),
sorted(self.pids1, reverse=True))
for pid in self.profile_log2.get_all_pids():
self.assertTrue(pid.split('-')[0] in self.pids2)
def test_clear(self):
self.profile_log1.clear('123')
self.assertFalse(os.path.exists(self.log_filename_prefix1 + '123'))
self.profile_log1.clear('current')
self.assertFalse(os.path.exists(self.log_filename_prefix1 +
str(os.getpid())))
self.profile_log1.clear('all')
for pid in self.pids1:
self.assertFalse(os.path.exists(self.log_filename_prefix1 + pid))
self.profile_log2.clear('321')
self.assertFalse(os.path.exists(self.log_filename_prefix2 + '321'))
self.profile_log2.clear('current')
self.assertFalse(os.path.exists(self.log_filename_prefix2 +
str(os.getpid())))
self.profile_log2.clear('all')
for pid in self.pids2:
self.assertFalse(os.path.exists(self.log_filename_prefix2 + pid))
def test_get_logfiles(self):
log_files = self.profile_log1.get_logfiles('all')
self.assertEqual(len(log_files), 3)
self.assertEqual(len(log_files), len(self.pids1))
log_files = self.profile_log1.get_logfiles('current')
self.assertEqual(len(log_files), 1)
self.assertEqual(log_files, [self.log_filename_prefix1
+ str(os.getpid())])
log_files = self.profile_log1.get_logfiles(self.pids1[0])
self.assertEqual(len(log_files), 1)
self.assertEqual(log_files, [self.log_filename_prefix1
+ self.pids1[0]])
log_files = self.profile_log2.get_logfiles('all')
self.assertEqual(len(log_files), 3)
self.assertEqual(len(log_files), len(self.pids2))
log_files = self.profile_log2.get_logfiles('current')
self.assertEqual(len(log_files), 1)
self.assertTrue(log_files[0].find(self.log_filename_prefix2 +
str(os.getpid())) > -1)
log_files = self.profile_log2.get_logfiles(self.pids2[0])
self.assertEqual(len(log_files), 1)
self.assertTrue(log_files[0].find(self.log_filename_prefix2 +
self.pids2[0]) > -1)
def test_dump_profile(self):
prof = xprofile.get_profiler('eventlet.green.profile')
prof.runctx('import os;os.getcwd();', globals(), locals())
prof.create_stats()
pfn = self.profile_log1.dump_profile(prof, os.getpid())
self.assertTrue(os.path.exists(pfn))
os.remove(pfn)
pfn = self.profile_log2.dump_profile(prof, os.getpid())
self.assertTrue(os.path.exists(pfn))
os.remove(pfn)
class Test_html_viewer(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.app = ProfileMiddleware(FakeApp, {})
self.log_files = []
self.tempdir = tempfile.mkdtemp()
self.log_filename_prefix = self.tempdir + '/unittest.profile'
self.profile_log = ProfileLog(self.log_filename_prefix, False)
self.pids = ['123', '456', str(os.getpid())]
profiler = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids:
profiler.runctx('import os;os.getcwd();', globals(), locals())
self.log_files.append(self.profile_log.dump_profile(profiler, pid))
self.viewer = HTMLViewer('__profile__', 'eventlet.green.profile',
self.profile_log)
body = (b"profile=123&profile=456&sort=time&sort=nc&limit=10"
b"&fulldirs=1&nfl_filter=getcwd&query=query&metric=nc")
wsgi_input = BytesIO(body)
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'profile=all',
'wsgi.input': wsgi_input}
req = Request.blank('/__profile__/', environ=environ)
self.query_dict = self.app._combine_body_qs(req)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def fake_call_back(self):
pass
def test_get_param(self):
query_dict = self.query_dict
get_param = self.viewer._get_param
self.assertEqual(get_param(query_dict, 'profile', 'current', True),
['123', '456'])
self.assertEqual(get_param(query_dict, 'profile', 'current'), '123')
self.assertEqual(get_param(query_dict, 'sort', 'time'), 'time')
self.assertEqual(get_param(query_dict, 'sort', 'time', True),
['time', 'nc'])
self.assertEqual(get_param(query_dict, 'limit', -1), 10)
self.assertEqual(get_param(query_dict, 'fulldirs', '0'), '1')
self.assertEqual(get_param(query_dict, 'nfl_filter', ''), 'getcwd')
self.assertEqual(get_param(query_dict, 'query', ''), 'query')
self.assertEqual(get_param(query_dict, 'metric', 'time'), 'nc')
self.assertEqual(get_param(query_dict, 'format', 'default'), 'default')
def test_render(self):
url = 'http://localhost:8080/__profile__'
path_entries = ['/__profile__'.split('/'),
'/__profile__/'.split('/'),
'/__profile__/123'.split('/'),
'/__profile__/123/'.split('/'),
'/__profile__/123/:0(getcwd)'.split('/'),
'/__profile__/all'.split('/'),
'/__profile__/all/'.split('/'),
'/__profile__/all/:0(getcwd)'.split('/'),
'/__profile__/current'.split('/'),
'/__profile__/current/'.split('/'),
'/__profile__/current/:0(getcwd)'.split('/')]
content, headers = self.viewer.render(url, 'GET', path_entries[0],
self.query_dict, None)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type', 'text/html')])
content, headers = self.viewer.render(url, 'POST', path_entries[0],
self.query_dict, None)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type', 'text/html')])
plot_dict = self.query_dict.copy()
plot_dict['plot'] = ['plot']
if PLOTLIB_INSTALLED:
content, headers = self.viewer.render(url, 'POST', path_entries[0],
plot_dict, None)
self.assertEqual(headers, [('content-type', 'image/jpg')])
else:
self.assertRaises(PLOTLIBNotInstalled, self.viewer.render,
url, 'POST', path_entries[0], plot_dict, None)
clear_dict = self.query_dict.copy()
clear_dict['clear'] = ['clear']
del clear_dict['query']
clear_dict['profile'] = ['xxx']
content, headers = self.viewer.render(url, 'POST', path_entries[0],
clear_dict, None)
self.assertEqual(headers, [('content-type', 'text/html')])
download_dict = self.query_dict.copy()
download_dict['download'] = ['download']
content, headers = self.viewer.render(url, 'POST', path_entries[0],
download_dict, None)
self.assertTrue(headers == [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.render(url, 'GET', path_entries[1],
self.query_dict, None)
self.assertTrue(isinstance(json.loads(content), dict))
for method in ['HEAD', 'PUT', 'DELETE', 'XYZMethod']:
self.assertRaises(MethodNotAllowed, self.viewer.render, url,
method, path_entries[10], self.query_dict, None)
for entry in path_entries[2:]:
download_dict['format'] = 'default'
content, headers = self.viewer.render(url, 'GET', entry,
download_dict, None)
self.assertTrue(
('content-type', self.viewer.format_dict['default'])
in headers, entry)
download_dict['format'] = 'json'
content, headers = self.viewer.render(url, 'GET', entry,
download_dict, None)
self.assertTrue(isinstance(json.loads(content), dict))
def test_index(self):
content, headers = self.viewer.index_page(self.log_files[0:1],
profile_id='current')
self.assertTrue(content.find('<html>') > -1)
self.assertTrue(headers == [('content-type', 'text/html')])
def test_index_all(self):
content, headers = self.viewer.index_page(self.log_files,
profile_id='all')
for f in self.log_files:
self.assertTrue(content.find(f) > 0, content)
self.assertTrue(headers == [('content-type', 'text/html')])
def test_download(self):
content, headers = self.viewer.download(self.log_files)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.download(self.log_files, sort='calls',
limit=10, nfl_filter='os')
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.download(self.log_files,
output_format='default')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.download(self.log_files,
output_format='json')
self.assertTrue(isinstance(json.loads(content), dict))
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['json'])])
content, headers = self.viewer.download(self.log_files,
output_format='csv')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['csv'])])
if ODFLIB_INSTALLED:
content, headers = self.viewer.download(self.log_files,
output_format='ods')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['ods'])])
else:
self.assertRaises(ODFLIBNotInstalled, self.viewer.download,
self.log_files, output_format='ods')
content, headers = self.viewer.download(self.log_files,
nfl_filter=__file__,
output_format='python')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['python'])])
def test_plot(self):
if PLOTLIB_INSTALLED:
content, headers = self.viewer.plot(self.log_files)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type', 'image/jpg')])
self.assertRaises(NotFoundException, self.viewer.plot, [])
else:
self.assertRaises(PLOTLIBNotInstalled, self.viewer.plot,
self.log_files)
def test_format_source_code(self):
osfile = os.__file__.rstrip('c')
nfl_os = '%s:%d(%s)' % (osfile, 136, 'makedirs')
self.assertIn('makedirs', self.viewer.format_source_code(nfl_os))
self.assertNotIn('makedirsXYZ', self.viewer.format_source_code(nfl_os))
nfl_illegal = '%sc:136(makedirs)' % osfile
self.assertIn(_('The file type are forbidden to access!'),
self.viewer.format_source_code(nfl_illegal))
nfl_not_exist = '%s.py:136(makedirs)' % osfile
expected_msg = _('Can not access the file %s.py.') % osfile
self.assertIn(expected_msg,
self.viewer.format_source_code(nfl_not_exist))
class TestStats2(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.profile_file = tempfile.mktemp('profile', 'unittest')
self.profilers = [xprofile.get_profiler('cProfile'),
xprofile.get_profiler('eventlet.green.profile')]
for p in self.profilers:
p.runctx('import os;os.getcwd();', globals(), locals())
p.dump_stats(self.profile_file)
self.stats2 = Stats2(self.profile_file)
self.selections = [['getcwd'], ['getcwd', -1],
['getcwd', -10], ['getcwd', 0.1]]
def tearDown(self):
os.remove(self.profile_file)
def test_func_to_dict(self):
func = ['profile.py', 100, '__call__']
self.assertEqual({'module': 'profile.py', 'line': 100, 'function':
'__call__'}, self.stats2.func_to_dict(func))
func = ['', 0, '__call__']
self.assertEqual({'module': '', 'line': 0, 'function':
'__call__'}, self.stats2.func_to_dict(func))
def test_to_json(self):
for selection in self.selections:
js = self.stats2.to_json(selection)
self.assertTrue(isinstance(json.loads(js), dict))
self.assertTrue(json.loads(js)['stats'] is not None)
self.assertTrue(json.loads(js)['stats'][0] is not None)
def test_to_ods(self):
if ODFLIB_INSTALLED:
for selection in self.selections:
self.assertTrue(self.stats2.to_ods(selection) is not None)
def test_to_csv(self):
for selection in self.selections:
self.assertTrue(self.stats2.to_csv(selection) is not None)
self.assertTrue('function calls' in self.stats2.to_csv(selection))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ttanner/kryptomime | tests/test_smime.py | 1 | 3239 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# S/MIME unit tests
#
# This file is part of kryptomime, a Python module for email kryptography.
# Copyright © 2013,2014 Thomas Tanner <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For more details see the file COPYING.
from pytest import fixture, mark, raises
from kryptomime import KeyMissingError
from kryptomime.mail import create_mail, protect_mail
from kryptomime.smime import OpenSMIME, Certificate, PrivateKey, MemoryKeyStore, OpenSSL, OpenSSL_CA
import email.mime.text
from conftest import sender, receiver
from test_openssl import x509keys, openssl
passphrase='mysecret'
attachment = email.mime.text.MIMEText('some\nattachment')
msg = create_mail(sender,receiver,'subject','body\nmessage')
msg.epilogue=''
msgatt = create_mail(sender,receiver,'subject','body\nmessage',attach=[attachment])
msgrev = create_mail(receiver,sender,'subject','body\nmessage')
msgself = create_mail(sender,sender,'subject','body\nmessage')
prot = protect_mail(msg,linesep='\r\n')
protatt = protect_mail(msgatt,linesep='\r\n')
def compare_mail(a,b):
if type(a)==str: return a==b
assert a.is_multipart() == b.is_multipart()
#from kryptomime.mail import ProtectedMessage
#assert isinstance(a,ProtectedMessage)==isinstance(b,ProtectedMessage)
# todo headers
if a.is_multipart():
for i in range(len(a.get_payload())):
ap = a.get_payload(i)
bp = b.get_payload(i)
assert ap.as_string() == bp.as_string()
else:
assert a.get_payload() == b.get_payload()
@fixture(scope='module')
def smimesender(x509keys,openssl):
return (OpenSMIME(openssl=openssl,default_key=x509keys[0]),x509keys[0].cacerts)
@fixture(scope='module')
def smimereceiver(x509keys,openssl):
return (OpenSMIME(openssl=openssl,default_key=x509keys[1]),x509keys[0].cacerts)
@mark.parametrize("attach", [False,True])
def test_sign(x509keys, attach, smimesender, smimereceiver):
id1, cacert1 = smimesender
id2, cacert2 = smimereceiver
mail = protatt if attach else prot
sgn = id1.sign(mail)
vfy, signer, valid = id2.verify(sgn,cacerts=cacert1)
assert valid and x509keys[0].cert == signer
compare_mail(mail,vfy)
@mark.parametrize("sign", [False,True])
def test_encrypt(x509keys, sign, smimesender, smimereceiver):
id1, cacert1 = smimesender
id2, cacert2 = smimereceiver
enc = id1.encrypt(protatt,[x509keys[1]],sign=sign, verify=True)
dec = id2.decrypt(enc,verify=sign,cacerts=cacert1)
if sign:
dec, signer, valid = dec
assert valid and x509keys[0].cert == signer
compare_mail(protatt,dec)
| lgpl-3.0 |
Pythonity/icon-font-to-png | icon_font_to_png/icon_font.py | 1 | 6185 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import re
from collections import OrderedDict
import tinycss
from PIL import Image, ImageFont, ImageDraw
from six import unichr
class IconFont(object):
"""Base class that represents web icon font"""
def __init__(self, css_file, ttf_file, keep_prefix=False):
"""
:param css_file: path to icon font CSS file
:param ttf_file: path to icon font TTF file
:param keep_prefix: whether to keep common icon prefix
"""
self.css_file = css_file
self.ttf_file = ttf_file
self.keep_prefix = keep_prefix
self.css_icons, self.common_prefix = self.load_css()
def load_css(self):
"""
Creates a dict of all icons available in CSS file, and finds out
what's their common prefix.
:returns sorted icons dict, common icon prefix
"""
icons = dict()
common_prefix = None
parser = tinycss.make_parser('page3')
stylesheet = parser.parse_stylesheet_file(self.css_file)
is_icon = re.compile("\.(.*):before,?")
for rule in stylesheet.rules:
selector = rule.selector.as_css()
# Skip CSS classes that are not icons
if not is_icon.match(selector):
continue
# Find out what the common prefix is
if common_prefix is None:
common_prefix = selector[1:]
else:
common_prefix = os.path.commonprefix((common_prefix,
selector[1:]))
for match in is_icon.finditer(selector):
name = match.groups()[0]
for declaration in rule.declarations:
if declaration.name == "content":
val = declaration.value.as_css()
# Strip quotation marks
if re.match("^['\"].*['\"]$", val):
val = val[1:-1]
icons[name] = unichr(int(val[1:], 16))
common_prefix = common_prefix or ''
# Remove common prefix
if not self.keep_prefix and len(common_prefix) > 0:
non_prefixed_icons = {}
for name in icons.keys():
non_prefixed_icons[name[len(common_prefix):]] = icons[name]
icons = non_prefixed_icons
sorted_icons = OrderedDict(sorted(icons.items(), key=lambda t: t[0]))
return sorted_icons, common_prefix
def export_icon(self, icon, size, color='black', scale='auto',
filename=None, export_dir='exported'):
"""
Exports given icon with provided parameters.
If the desired icon size is less than 150x150 pixels, we will first
create a 150x150 pixels image and then scale it down, so that
it's much less likely that the edges of the icon end up cropped.
:param icon: valid icon name
:param filename: name of the output file
:param size: icon size in pixels
:param color: color name or hex value
:param scale: scaling factor between 0 and 1,
or 'auto' for automatic scaling
:param export_dir: path to export directory
"""
org_size = size
size = max(150, size)
image = Image.new("RGBA", (size, size), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(image)
if scale == 'auto':
scale_factor = 1
else:
scale_factor = float(scale)
font = ImageFont.truetype(self.ttf_file, int(size * scale_factor))
width, height = draw.textsize(self.css_icons[icon], font=font)
# If auto-scaling is enabled, we need to make sure the resulting
# graphic fits inside the boundary. The values are rounded and may be
# off by a pixel or two, so we may need to do a few iterations.
# The use of a decrementing multiplication factor protects us from
# getting into an infinite loop.
if scale == 'auto':
iteration = 0
factor = 1
while True:
width, height = draw.textsize(self.css_icons[icon], font=font)
# Check if the image fits
dim = max(width, height)
if dim > size:
font = ImageFont.truetype(self.ttf_file,
int(size * size/dim * factor))
else:
break
# Adjust the factor every two iterations
iteration += 1
if iteration % 2 == 0:
factor *= 0.99
draw.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=color)
# Get bounding box
bbox = image.getbbox()
# Create an alpha mask
image_mask = Image.new("L", (size, size), 0)
draw_mask = ImageDraw.Draw(image_mask)
# Draw the icon on the mask
draw_mask.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=255)
# Create a solid color image and apply the mask
icon_image = Image.new("RGBA", (size, size), color)
icon_image.putalpha(image_mask)
if bbox:
icon_image = icon_image.crop(bbox)
border_w = int((size - (bbox[2] - bbox[0])) / 2)
border_h = int((size - (bbox[3] - bbox[1])) / 2)
# Create output image
out_image = Image.new("RGBA", (size, size), (0, 0, 0, 0))
out_image.paste(icon_image, (border_w, border_h))
# If necessary, scale the image to the target size
if org_size != size:
out_image = out_image.resize((org_size, org_size), Image.ANTIALIAS)
# Make sure export directory exists
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# Default filename
if not filename:
filename = icon + '.png'
# Save file
out_image.save(os.path.join(export_dir, filename))
| mit |
h3biomed/ansible | lib/ansible/modules/storage/netapp/na_ontap_lun_copy.py | 28 | 5680 | #!/usr/bin/python
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_lun_copy
short_description: NetApp ONTAP copy LUNs
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Copy LUNs on NetApp ONTAP.
options:
state:
description:
- Whether the specified LUN should exist or not.
choices: ['present']
default: present
destination_vserver:
description:
- the name of the Vserver that will host the new LUN.
required: true
destination_path:
description:
- Specifies the full path to the new LUN.
required: true
source_path:
description:
- Specifies the full path to the source LUN.
required: true
source_vserver:
description:
- Specifies the name of the vserver hosting the LUN to be copied.
'''
EXAMPLES = """
- name: Copy LUN
na_ontap_lun_copy:
destination_vserver: ansible
destination_path: /vol/test/test_copy_dest_dest_new
source_path: /vol/test/test_copy_1
source_vserver: ansible
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.netapp_module import NetAppModule
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapLUNCopy(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
destination_vserver=dict(required=True, type='str'),
destination_path=dict(required=True, type='str'),
source_path=dict(required=True, type='str'),
source_vserver=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['destination_vserver'])
def get_lun(self):
"""
Check if the LUN exists
:return: true is it exists, false otherwise
:rtype: bool
"""
return_value = False
lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('path', self.parameters['destination_path'])
query_details.add_new_child('vserver', self.parameters['destination_vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_info.add_child_elem(query)
try:
result = self.server.invoke_successfully(lun_info, True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" %
(self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(e)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return_value = True
return return_value
def copy_lun(self):
"""
Copy LUN with requested path and vserver
"""
lun_copy = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-copy-start', **{'source-vserver': self.parameters['source_vserver']})
path_obj = netapp_utils.zapi.NaElement('paths')
pair = netapp_utils.zapi.NaElement('lun-path-pair')
pair.add_new_child('destination-path', self.parameters['destination_path'])
pair.add_new_child('source-path', self.parameters['source_path'])
path_obj.add_child_elem(pair)
lun_copy.add_child_elem(path_obj)
try:
self.server.invoke_successfully(lun_copy, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" %
(self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(e)),
exception=traceback.format_exc())
def apply(self):
netapp_utils.ems_log_event("na_ontap_lun_copy", self.server)
if self.get_lun(): # lun already exists at destination
changed = False
else:
changed = True
if self.module.check_mode:
pass
else:
# need to copy lun
if self.parameters['state'] == 'present':
self.copy_lun()
self.module.exit_json(changed=changed)
def main():
v = NetAppOntapLUNCopy()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
aabbox/kbengine | kbe/res/scripts/common/Lib/test/test__osx_support.py | 72 | 11776 | """
Test suite for _osx_support: shared OS X support functions.
"""
import os
import platform
import shutil
import stat
import sys
import unittest
import test.support
import _osx_support
@unittest.skipUnless(sys.platform.startswith("darwin"), "requires OS X")
class Test_OSXSupport(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.prog_name = 'bogus_program_xxxx'
self.temp_path_dir = os.path.abspath(os.getcwd())
self.env = test.support.EnvironmentVarGuard()
self.addCleanup(self.env.__exit__)
for cv in ('CFLAGS', 'LDFLAGS', 'CPPFLAGS',
'BASECFLAGS', 'BLDSHARED', 'LDSHARED', 'CC',
'CXX', 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS'):
if cv in self.env:
self.env.unset(cv)
def add_expected_saved_initial_values(self, config_vars, expected_vars):
# Ensure that the initial values for all modified config vars
# are also saved with modified keys.
expected_vars.update(('_OSX_SUPPORT_INITIAL_'+ k,
config_vars[k]) for k in config_vars
if config_vars[k] != expected_vars[k])
def test__find_executable(self):
if self.env['PATH']:
self.env['PATH'] = self.env['PATH'] + ':'
self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
test.support.unlink(self.prog_name)
self.assertIsNone(_osx_support._find_executable(self.prog_name))
self.addCleanup(test.support.unlink, self.prog_name)
with open(self.prog_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo OK\n")
os.chmod(self.prog_name, stat.S_IRWXU)
self.assertEqual(self.prog_name,
_osx_support._find_executable(self.prog_name))
def test__read_output(self):
if self.env['PATH']:
self.env['PATH'] = self.env['PATH'] + ':'
self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
test.support.unlink(self.prog_name)
self.addCleanup(test.support.unlink, self.prog_name)
with open(self.prog_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo ExpectedOutput\n")
os.chmod(self.prog_name, stat.S_IRWXU)
self.assertEqual('ExpectedOutput',
_osx_support._read_output(self.prog_name))
def test__find_build_tool(self):
out = _osx_support._find_build_tool('cc')
self.assertTrue(os.path.isfile(out),
'cc not found - check xcode-select')
def test__get_system_version(self):
self.assertTrue(platform.mac_ver()[0].startswith(
_osx_support._get_system_version()))
def test__remove_original_values(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = {
'CC': 'clang -pthreads',
}
cv = 'CC'
newvalue = 'clang -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertNotEqual(expected_vars, config_vars)
_osx_support._remove_original_values(config_vars)
self.assertEqual(expected_vars, config_vars)
def test__save_modified_value(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = {
'CC': 'clang -pthreads',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
cv = 'CC'
newvalue = 'clang -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertEqual(expected_vars, config_vars)
def test__save_modified_value_unchanged(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = config_vars.copy()
cv = 'CC'
newvalue = 'gcc-test -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertEqual(expected_vars, config_vars)
def test__supports_universal_builds(self):
import platform
mac_ver_tuple = tuple(int(i) for i in
platform.mac_ver()[0].split('.')[0:2])
self.assertEqual(mac_ver_tuple >= (10, 4),
_osx_support._supports_universal_builds())
def test__find_appropriate_compiler(self):
compilers = (
('gcc-test', 'i686-apple-darwin11-llvm-gcc-4.2'),
('clang', 'clang version 3.1'),
)
config_vars = {
'CC': 'gcc-test -pthreads',
'CXX': 'cc++-test',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-test -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-test -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang -pthreads',
'CXX': 'clang++',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'clang -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'clang -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
for c_name, c_output in compilers:
test.support.unlink(c_name)
self.addCleanup(test.support.unlink, c_name)
with open(c_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo " + c_output)
os.chmod(c_name, stat.S_IRWXU)
self.assertEqual(expected_vars,
_osx_support._find_appropriate_compiler(
config_vars))
def test__remove_universal_flags(self):
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 ',
'LDFLAGS': ' -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -g',
'LDSHARED': 'gcc-4.0 -bundle -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._remove_universal_flags(
config_vars))
def test__remove_unsupported_archs(self):
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch i386 ',
'LDFLAGS': ' -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
c_name = 'clang'
test.support.unlink(c_name)
self.addCleanup(test.support.unlink, c_name)
# exit status 255 means no PPC support in this compiler chain
with open(c_name, 'w') as f:
f.write("#!/bin/sh\nexit 255")
os.chmod(c_name, stat.S_IRWXU)
self.assertEqual(expected_vars,
_osx_support._remove_unsupported_archs(
config_vars))
def test__override_all_archs(self):
self.env['ARCHFLAGS'] = '-arch x86_64'
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch x86_64',
'LDFLAGS': ' -g -arch x86_64',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -g -arch x86_64',
'LDSHARED': 'gcc-4.0 -bundle -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk -g -arch x86_64',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._override_all_archs(
config_vars))
def test__check_for_unavailable_sdk(self):
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.1.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
' ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
' -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._check_for_unavailable_sdk(
config_vars))
def test_get_platform_osx(self):
# Note, get_platform_osx is currently tested more extensively
# indirectly by test_sysconfig and test_distutils
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
}
result = _osx_support.get_platform_osx(config_vars, ' ', ' ', ' ')
self.assertEqual(('macosx', '10.6', 'fat'), result)
def test_main():
if sys.platform == 'darwin':
test.support.run_unittest(Test_OSXSupport)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
tokenly/counterparty-lib | counterpartylib/lib/messages/execute.py | 3 | 7034 | #! /usr/bin/python3
"""Execute arbitrary data as a smart contract."""
import struct
import binascii
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import (util, config, exceptions)
from .scriptlib import (utils, blocks, processblock)
FORMAT = '>20sQQQ'
LENGTH = 44
ID = 101
def initialise (db):
cursor = db.cursor()
# Executions
cursor.execute('''CREATE TABLE IF NOT EXISTS executions(
tx_index INTEGER UNIQUE,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
contract_id TEXT,
gas_price INTEGER,
gas_start INTEGER,
gas_cost INTEGER,
gas_remained INTEGER,
value INTEGER,
data BLOB,
output BLOB,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON executions(source)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx_hash_idx ON executions(tx_hash)
''')
# Contract Storage
cursor.execute('''CREATE TABLE IF NOT EXISTS storage(
contract_id TEXT,
key BLOB,
value BLOB,
FOREIGN KEY (contract_id) REFERENCES contracts(contract_id))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
contract_id_idx ON contracts(contract_id)
''')
# Suicides
cursor.execute('''CREATE TABLE IF NOT EXISTS suicides(
contract_id TEXT PRIMARY KEY,
FOREIGN KEY (contract_id) REFERENCES contracts(contract_id))
''')
# Nonces
cursor.execute('''CREATE TABLE IF NOT EXISTS nonces(
address TEXT PRIMARY KEY,
nonce INTEGER)
''')
# Postqueue
cursor.execute('''CREATE TABLE IF NOT EXISTS postqueue(
message BLOB)
''')
def compose (db, source, contract_id, gasprice, startgas, value, payload_hex):
if not config.TESTNET: # TODO
return
payload = binascii.unhexlify(payload_hex)
if startgas < 0:
raise processblock.ContractError('negative startgas')
if gasprice < 0:
raise processblock.ContractError('negative gasprice')
# Pack.
data = struct.pack(config.TXTYPE_FORMAT, ID)
curr_format = FORMAT + '{}s'.format(len(payload))
data += struct.pack(curr_format, binascii.unhexlify(contract_id), gasprice, startgas, value, payload)
return (source, [], data)
class Transaction(object):
def __init__(self, tx, to, gasprice, startgas, value, data):
assert type(data) == bytes
self.block_index = tx['block_index']
self.tx_hash = tx['tx_hash']
self.tx_index = tx['tx_index']
self.sender = tx['source']
self.data = data
self.to = to
self.gasprice = gasprice
self.startgas = startgas
self.value = value
self.timestamp = tx['block_time']
def hex_hash(self):
return '<None>'
def to_dict(self):
dict_ = {
'sender': self.sender,
'data': utils.hexprint(self.data),
'to': self.to,
'gasprice': self.gasprice,
'startgas': self.startgas,
'value': self.value
}
return dict_
def parse (db, tx, message):
if not config.TESTNET: # TODO
return
status = 'valid'
output, gas_cost, gas_remained = None, None, None
try:
# TODO: Use unpack function.
# Unpack message.
curr_format = FORMAT + '{}s'.format(len(message) - LENGTH)
try:
contract_id, gasprice, startgas, value, payload = struct.unpack(curr_format, message)
if gasprice > config.MAX_INT or startgas > config.MAX_INT: # TODO: define max for gasprice and startgas
raise exceptions.UnpackError()
except (struct.error) as e:
raise exceptions.UnpackError()
gas_remained = startgas
contract_id = util.hexlify(contract_id)
if contract_id == '0000000000000000000000000000000000000000':
contract_id = ''
# ‘Apply transaction’!
tx_obj = Transaction(tx, contract_id, gasprice, startgas, value, payload)
block_obj = blocks.Block(db, tx['block_hash'])
success, output, gas_remained = processblock.apply_transaction(db, tx_obj, block_obj)
if not success and output == '':
status = 'out of gas'
gas_cost = gasprice * (startgas - gas_remained) # different definition from pyethereum’s
except exceptions.UnpackError as e:
contract_id, gasprice, startgas, value, payload = None, None, None, None, None
status = 'invalid: could not unpack'
output = None
except processblock.ContractError as e:
status = 'invalid: no such contract'
contract_id = None
output = None
except processblock.InsufficientStartGas as e:
have, need = e.args
logger.debug('Insufficient start gas: have {} and need {}'.format(have, need))
status = 'invalid: insufficient start gas'
output = None
except processblock.InsufficientBalance as e:
have, need = e.args
logger.debug('Insufficient balance: have {} and need {}'.format(have, need))
status = 'invalid: insufficient balance'
output = None
except processblock.OutOfGas as e:
logger.debug('TX OUT_OF_GAS (startgas: {}, gas_remained: {})'.format(startgas, gas_remained))
status = 'out of gas'
output = None
finally:
if status == 'valid':
logger.debug('TX FINISHED (gas_remained: {})'.format(gas_remained))
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'contract_id': contract_id,
'gasprice': gasprice,
'startgas': startgas,
'gas_cost': gas_cost,
'gas_remained': gas_remained,
'value': value,
'payload': payload,
'output': output,
'status': status
}
sql='insert into executions values(:tx_index, :tx_hash, :block_index, :source, :contract_id, :gasprice, :startgas, :gas_cost, :gas_remained, :value, :data, :output, :status)'
cursor = db.cursor()
cursor.execute(sql, bindings)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit |
whereismyjetpack/ansible | lib/ansible/modules/cloud/ovirt/ovirt_snapshots_facts.py | 13 | 4318 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_snapshots_facts
short_description: Retrieve facts about one or more oVirt virtual machine snapshots
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt virtual machine snapshots."
notes:
- "This module creates a new top-level C(ovirt_snapshots) fact, which
contains a list of snapshots."
options:
vm:
description:
- "Name of the VM with snapshot."
required: true
description:
description:
- "Description of the snapshot, can be used as glob expression."
snapshot_id:
description:
- "Id of the snaphost we want to retrieve facts about."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all snapshots which description start with C(update) for VM named C(centos7):
- ovirt_snapshots_facts:
vm: centos7
description: update*
- debug:
var: ovirt_snapshots
'''
RETURN = '''
ovirt_snapshots:
description: "List of dictionaries describing the snapshot. Snapshot attribtues are mapped to dictionary keys,
all snapshot attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/snapshot."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
vm=dict(required=True),
description=dict(default=None),
snapshot_id=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
vms_service = connection.system_service().vms_service()
vm_name = module.params['vm']
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
snapshots_service = vms_service.service(vm.id).snapshots_service()
if module.params['description']:
snapshots = [
e for e in snapshots_service.list()
if fnmatch.fnmatch(e.description, module.params['description'])
]
elif module.params['snapshot_id']:
snapshots = [
snapshots_service.snapshot_service(module.params['snapshot_id']).get()
]
else:
snapshots = snapshots_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_snapshots=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in snapshots
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
JPFrancoia/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
mlyundin/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
scith/htpc-manager_ynh | sources/libs/sqlobject/declarative.py | 10 | 7263 | """
Declarative objects.
Declarative objects have a simple protocol: you can use classes in
lieu of instances and they are equivalent, and any keyword arguments
you give to the constructor will override those instance variables.
(So if a class is received, we'll simply instantiate an instance with
no arguments).
You can provide a variable __unpackargs__ (a list of strings), and if
the constructor is called with non-keyword arguments they will be
interpreted as the given keyword arguments.
If __unpackargs__ is ('*', name), then all the arguments will be put
in a variable by that name.
You can define a __classinit__(cls, new_attrs) method, which will be
called when the class is created (including subclasses). Note: you
can't use super() in __classinit__ because the class isn't bound to a
name. As an analog to __classinit__, Declarative adds
__instanceinit__ which is called with the same argument (new_attrs).
This is like __init__, but after __unpackargs__ and other factors have
been taken into account.
If __mutableattributes__ is defined as a sequence of strings, these
attributes will not be shared between superclasses and their
subclasses. E.g., if you have a class variable that contains a list
and you append to that list, changes to subclasses will effect
superclasses unless you add the attribute here.
Also defines classinstancemethod, which acts as either a class method
or an instance method depending on where it is called.
"""
import copy
import events
import itertools
counter = itertools.count()
__all__ = ('classinstancemethod', 'DeclarativeMeta', 'Declarative')
class classinstancemethod(object):
"""
Acts like a class method when called from a class, like an
instance method when called by an instance. The method should
take two arguments, 'self' and 'cls'; one of these will be None
depending on how the method was called.
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, type=None):
return _methodwrapper(self.func, obj=obj, type=type)
class _methodwrapper(object):
def __init__(self, func, obj, type):
self.func = func
self.obj = obj
self.type = type
def __call__(self, *args, **kw):
assert not 'self' in kw and not 'cls' in kw, (
"You cannot use 'self' or 'cls' arguments to a "
"classinstancemethod")
return self.func(*((self.obj, self.type) + args), **kw)
def __repr__(self):
if self.obj is None:
return ('<bound class method %s.%s>'
% (self.type.__name__, self.func.func_name))
else:
return ('<bound method %s.%s of %r>'
% (self.type.__name__, self.func.func_name, self.obj))
class DeclarativeMeta(type):
def __new__(meta, class_name, bases, new_attrs):
post_funcs = []
early_funcs = []
events.send(events.ClassCreateSignal,
bases[0], class_name, bases, new_attrs,
post_funcs, early_funcs)
cls = type.__new__(meta, class_name, bases, new_attrs)
for func in early_funcs:
func(cls)
if '__classinit__' in new_attrs:
cls.__classinit__ = staticmethod(cls.__classinit__.im_func)
cls.__classinit__(cls, new_attrs)
for func in post_funcs:
func(cls)
return cls
class Declarative(object):
__unpackargs__ = ()
__mutableattributes__ = ()
__metaclass__ = DeclarativeMeta
__restrict_attributes__ = None
def __classinit__(cls, new_attrs):
cls.declarative_count = counter.next()
for name in cls.__mutableattributes__:
if name not in new_attrs:
setattr(cls, copy.copy(getattr(cls, name)))
def __instanceinit__(self, new_attrs):
if self.__restrict_attributes__ is not None:
for name in new_attrs:
if name not in self.__restrict_attributes__:
raise TypeError(
'%s() got an unexpected keyword argument %r'
% (self.__class__.__name__, name))
for name, value in new_attrs.items():
setattr(self, name, value)
if 'declarative_count' not in new_attrs:
self.declarative_count = counter.next()
def __init__(self, *args, **kw):
if self.__unpackargs__ and self.__unpackargs__[0] == '*':
assert len(self.__unpackargs__) == 2, \
"When using __unpackargs__ = ('*', varname), you must only provide a single variable name (you gave %r)" % self.__unpackargs__
name = self.__unpackargs__[1]
if name in kw:
raise TypeError(
"keyword parameter '%s' was given by position and name"
% name)
kw[name] = args
else:
if len(args) > len(self.__unpackargs__):
raise TypeError(
'%s() takes at most %i arguments (%i given)'
% (self.__class__.__name__,
len(self.__unpackargs__),
len(args)))
for name, arg in zip(self.__unpackargs__, args):
if name in kw:
raise TypeError(
"keyword parameter '%s' was given by position and name"
% name)
kw[name] = arg
if '__alsocopy' in kw:
for name, value in kw['__alsocopy'].items():
if name not in kw:
if name in self.__mutableattributes__:
value = copy.copy(value)
kw[name] = value
del kw['__alsocopy']
self.__instanceinit__(kw)
def __call__(self, *args, **kw):
kw['__alsocopy'] = self.__dict__
return self.__class__(*args, **kw)
@classinstancemethod
def singleton(self, cls):
if self:
return self
name = '_%s__singleton' % cls.__name__
if not hasattr(cls, name):
setattr(cls, name, cls(declarative_count=cls.declarative_count))
return getattr(cls, name)
@classinstancemethod
def __repr__(self, cls):
if self:
name = '%s object' % self.__class__.__name__
v = self.__dict__.copy()
else:
name = '%s class' % cls.__name__
v = cls.__dict__.copy()
if 'declarative_count' in v:
name = '%s %i' % (name, v['declarative_count'])
del v['declarative_count']
# @@: simplifying repr:
#v = {}
names = v.keys()
args = []
for n in self._repr_vars(names):
args.append('%s=%r' % (n, v[n]))
if not args:
return '<%s>' % name
else:
return '<%s %s>' % (name, ' '.join(args))
@staticmethod
def _repr_vars(dictNames):
names = [n for n in dictNames
if not n.startswith('_')
and n != 'declarative_count']
names.sort()
return names
def setup_attributes(cls, new_attrs):
for name, value in new_attrs.items():
if hasattr(value, '__addtoclass__'):
value.__addtoclass__(cls, name)
| gpl-3.0 |
fredericlepied/ansible | lib/ansible/modules/network/vyos/vyos_user.py | 11 | 10403 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: vyos_user
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage the collection of local users on VyOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
options:
users:
description:
- The set of username objects to be configured on the remote
VyOS device. The list entries can either be the username or
a hash of username and properties. This argument is mutually
exclusive with the C(name) argument. alias C(aggregate).
name:
description:
- The username to be configured on the VyOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
password:
description:
- The password to be configured on the VyOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
level:
description:
- The C(level) argument configures the level of the user when logged
into the system. This argument accepts string values admin or operator.
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
vyos_user:
name: ansible
password: password
state: present
- name: remove all users except admin
vyos_user:
purge: yes
- name: set multiple users to level operator
vyos_user:
users:
- name: netop
- name: netend
level: operator
state: present
- name: Change Password for User netop
vyos_user:
name: netop
password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system login user test level operator
- set system login user authentication plaintext-password password
"""
import re
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def validate_level(value, module):
if value not in ('admin', 'operator'):
module.fail_json(msg='level must be either admin or operator, got %s' % value)
def spec_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('set system login user %s %s' % (want['name'], x))
for update in updates:
want, have = update
if want['state'] == 'absent':
commands.append('delete system login user %s' % want['name'])
continue
if needs_update(want, have, 'level'):
add(commands, want, "level %s" % want['level'])
if needs_update(want, have, 'full_name'):
add(commands, want, "full-name %s" % want['full_name'])
if needs_update(want, have, 'password'):
if update_password == 'always' or not have:
add(commands, want, 'authentication plaintext-password %s' % want['password'])
return commands
def parse_level(data):
match = re.search(r'level (\S+)', data, re.M)
if match:
level = match.group(1)[1:-1]
return level
def parse_full_name(data):
match = re.search(r'full-name (\S+)', data, re.M)
if match:
full_name = match.group(1)[1:-1]
return full_name
def config_to_dict(module):
data = get_config(module)
match = re.findall(r'^set system login user (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r' %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'password': None,
'level': parse_level(cfg),
'full_name': parse_full_name(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['users']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['password'] = get_value('password')
item['full_name'] = get_value('full_name')
item['level'] = get_value('level')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if item is None:
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
argument_spec = dict(
users=dict(type='list', aliases=['aggregate']),
name=dict(),
full_name=dict(),
level=dict(aliases=['role']),
password=dict(no_log=True),
update_password=dict(default='always', choices=['on_create', 'always']),
purge=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
mutually_exclusive = [('name', 'users')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
commands.append('delete system login user %s' % item)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jillesme/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/examples/add_person.py | 432 | 1656 | #! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# This function fills in a Person message based on user input.
def PromptForAddress(person):
person.id = int(raw_input("Enter person ID number: "))
person.name = raw_input("Enter name: ")
email = raw_input("Enter email address (blank for none): ")
if email != "":
person.email = email
while True:
number = raw_input("Enter a phone number (or leave blank to finish): ")
if number == "":
break
phone_number = person.phone.add()
phone_number.number = number
type = raw_input("Is this a mobile, home, or work phone? ")
if type == "mobile":
phone_number.type = addressbook_pb2.Person.MOBILE
elif type == "home":
phone_number.type = addressbook_pb2.Person.HOME
elif type == "work":
phone_number.type = addressbook_pb2.Person.WORK
else:
print "Unknown phone type; leaving as default value."
# Main procedure: Reads the entire address book from a file,
# adds one person based on user input, then writes it back out to the same
# file.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
try:
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
except IOError:
print sys.argv[1] + ": File not found. Creating a new file."
# Add an address.
PromptForAddress(address_book.person.add())
# Write the new address book back to disk.
f = open(sys.argv[1], "wb")
f.write(address_book.SerializeToString())
f.close()
| bsd-3-clause |
datascopeanalytics/traces | tests/test_docs.py | 1 | 1056 | import traces
from datetime import datetime
def test_quickstart():
time_series = traces.TimeSeries()
time_series[datetime(2042, 2, 1, 6, 0, 0)] = 0 # 6:00:00am
time_series[datetime(2042, 2, 1, 7, 45, 56)] = 1 # 7:45:56am
time_series[datetime(2042, 2, 1, 8, 51, 42)] = 0 # 8:51:42am
time_series[datetime(2042, 2, 1, 12, 3, 56)] = 1 # 12:03:56am
time_series[datetime(2042, 2, 1, 12, 7, 13)] = 0 # 12:07:13am
assert time_series[datetime(2042, 2, 1, 11, 0, 0)] == 0
distribution = time_series.distribution(
start=datetime(2042, 2, 1, 6, 0, 0), # 6:00am
end=datetime(2042, 2, 1, 13, 0, 0) # 1:00pm
)
assert distribution[1] == 0.16440476190476191
def test_reference():
cart = traces.TimeSeries()
cart[1.2] = {'broccoli'}
cart[1.7] = {'broccoli', 'apple'}
cart[2.2] = {'apple'}
cart[3.5] = {'apple', 'beets'}
assert cart[2] == {'broccoli', 'apple'}
assert cart[-1] is None
cart = traces.TimeSeries(default=set())
assert cart[-1] == set([])
| mit |
xyloeric/pi | piExp/users/views.py | 55 | 1681 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username" | bsd-3-clause |
guolivar/totus-niwa | service/thirdparty/featureserver/FeatureServer/DataSource/DBM.py | 1 | 6212 | __author__ = "MetaCarta"
__copyright__ = "Copyright (c) 2006-2008 MetaCarta"
__license__ = "Clear BSD"
__version__ = "$Id: DBM.py 444 2008-03-19 01:35:35Z brentp $"
from FeatureServer.DataSource import DataSource
from FeatureServer.DataSource import Lock
from FeatureServer.Service.Action import Action
import anydbm
import UserDict
try:
import cPickle as pickle
except ImportError:
import pickle
class DBM (DataSource):
"""Simple datasource using the anydbm module and pickled datastructures."""
def __init__(self, name, writable = 0, lockfile = None, unique = None, **args):
DataSource.__init__(self, name, **args)
self.db = Recno( args["file"] )
self.append = self.db.append
self.writable = int(writable)
self.unique = unique
if self.writable and lockfile:
self.lock = Lock(lockfile)
else:
self.lock = None
def __iter__ (self):
return self.db.__iter__()
def begin (self):
if self.lock: return self.lock.lock()
def commit (self):
if hasattr(self.db, "sync"): self.db.sync()
if self.lock: self.lock.unlock()
def rollback (self):
if self.lock: self.lock.unlock()
def insert (self, action):
if self.unique:
action.id = self.insertUnique(action.feature)
else:
thunk = self.freeze_feature(action.feature)
action.id = self.append(thunk)
return self.select(action)
def insertUnique(self, feature):
if not feature.properties.has_key(self.unique):
raise Exception("Unique key (%s) missing from feature." % self.unique)
action = Action()
action.attributes[self.unique] = feature.properties[self.unique]
features = self.select(action)
if len(features) > 1:
raise Exception("There are two features with the unique key %s. Something's wrong with that." % feature.properties[self.unique])
thunk = self.freeze_feature(feature)
if len(features) == 0:
return self.append(thunk)
else:
self.db[features[0].id] = thunk
return features[0].id
def update (self, action):
self.db[action.id] = self.freeze_feature(action.feature)
return self.select(action)
def delete (self, action):
feature = action.feature
if action.id:
del self.db[action.id]
elif action.attributes:
for feat in self.select(action):
del self.db[feat.id]
return []
def select (self, action):
def _overlap (a, b):
return a[2] >= b[0] and \
b[2] >= a[0] and \
a[3] >= b[1] and \
b[3] >= a[1]
if action.id is not None:
feature = self.thaw_feature( self.db[action.id] )
feature.id = action.id
return [feature]
else:
result = []
count = action.maxfeatures
counter = 0
for id in self:
if counter < action.startfeature:
counter += 1
continue
thunk = self.db[id]
feature = self.thaw_feature(thunk)
feature.id = id
if action.bbox and not _overlap(action.bbox, feature.bbox):
continue
if action.attributes:
props = feature.properties
skip = False
for key, val in action.attributes.items():
if (key not in props and val is not None) or \
(key in props and str(props[key]) != val):
skip = True
break
if skip: continue
result.append(feature)
if count is not None:
count -= 1
if not count: break
return result
def freeze_feature (self, feature):
feature.bbox = feature.get_bbox()
return pickle.dumps(feature)
def thaw_feature (self, thunk):
return pickle.loads(thunk)
class Recno (object):
"""Class to handle managment of the database file."""
class Iterator (object):
def __init__ (self, recno, idx = 0):
self.recno = recno
self.idx = self.recno.max + 1
self.stopIdx = idx
def __iter__ (self):
return self
def next (self):
while True:
self.idx -= 1
if self.idx == 0 or self.idx == self.stopIdx:
raise StopIteration
if not self.recno.has_key(self.idx):
continue
return self.idx
def __init__(self, file):
self.file = file
self.max = 0
self.data = None
self.open()
def __getitem__ (self, key):
if not self.data:
self.open()
return self.data[str(key)]
def __setitem__ (self, key, val):
if not self.data:
self.open()
self.data[str(key)] = val
if key > self.max: self.max = key
def __delitem__ (self, key):
if not self.data:
self.open()
del self.data[str(key)]
def __len__ (self):
if not self.data:
self.open()
return len(self.data)
def __iter__ (self):
return self.Iterator(self)
def has_key (self, key):
if not self.data:
self.open()
return self.data.has_key(str(key))
def sync (self, reopen=True):
if not self.data:
self.open()
self.data["_"] = str(self.max)
del self.data
self.data = None
if reopen:
self.data = anydbm.open( self.file, "c" )
def __del__ (self):
self.sync(False)
def append (self, val):
self.max += 1
self.__setitem__(self.max, val)
return self.max
def open(self):
self.data = anydbm.open( self.file, "c" )
if self.data.has_key("_"):
self.max = int(self.data["_"])
| gpl-3.0 |
GladeRom/android_external_chromium_org | tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/top_20.py | 34 | 3245 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Top20Page(page_module.Page):
def __init__(self, url, page_set, name=''):
super(Top20Page, self).__init__(url=url, page_set=page_set, name=name)
self.archive_data_file = '../data/chrome_proxy_top_20.json'
class Top20PageSet(page_set_module.PageSet):
""" Pages hand-picked for Chrome Proxy tests. """
def __init__(self):
super(Top20PageSet, self).__init__(
archive_data_file='../data/chrome_proxy_top_20.json')
# Why: top google property; a google tab is often open
self.AddPage(Top20Page('https://www.google.com/#hl=en&q=barack+obama',
self))
# Why: #3 (Alexa global)
self.AddPage(Top20Page('http://www.youtube.com', self))
# Why: #18 (Alexa global), Picked an interesting post
self.AddPage(Top20Page(
# pylint: disable=C0301
'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
self, 'Wordpress'))
# Why: top social,Public profile
self.AddPage(Top20Page('http://www.facebook.com/barackobama', self,
'Facebook'))
# Why: #12 (Alexa global),Public profile
self.AddPage(Top20Page('http://www.linkedin.com/in/linustorvalds', self,
'LinkedIn'))
# Why: #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddPage(Top20Page('http://en.wikipedia.org/wiki/Wikipedia', self,
'Wikipedia (1 tab)'))
# Why: #8 (Alexa global),Picked an interesting page
self.AddPage(Top20Page('https://twitter.com/katyperry', self, 'Twitter'))
# Why: #37 (Alexa global)
self.AddPage(Top20Page('http://pinterest.com', self, 'Pinterest'))
# Why: #1 sports
self.AddPage(Top20Page('http://espn.go.com', self, 'ESPN'))
# Why: #1 news worldwide (Alexa global)
self.AddPage(Top20Page('http://news.yahoo.com', self))
# Why: #2 news worldwide
self.AddPage(Top20Page('http://www.cnn.com', self))
# Why: #7 (Alexa news); #27 total time spent,Picked interesting page
self.AddPage(Top20Page(
'http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
self, 'Weather.com'))
# Why: #1 world commerce website by visits; #3 commerce in the US by time
# spent
self.AddPage(Top20Page('http://www.amazon.com', self))
# Why: #1 commerce website by time spent by users in US
self.AddPage(Top20Page('http://www.ebay.com', self))
# Why: #1 games according to Alexa (with actual games in it)
self.AddPage(Top20Page('http://games.yahoo.com', self))
# Why: #1 Alexa recreation
self.AddPage(Top20Page('http://booking.com', self))
# Why: #1 Alexa reference
self.AddPage(Top20Page('http://answers.yahoo.com', self))
# Why: #1 Alexa sports
self.AddPage(Top20Page('http://sports.yahoo.com/', self))
# Why: top tech blog
self.AddPage(Top20Page('http://techcrunch.com', self))
self.AddPage(Top20Page('http://www.nytimes.com', self))
| bsd-3-clause |
jkandasa/integration_tests | scripts/enable_internal_db.py | 8 | 1146 | #!/usr/bin/env python2
"""SSH in to a running appliance and set up an internal DB.
An optional region can be specified (default 0), and the script
will use the first available unpartitioned disk as the data volume
for postgresql.
Running this script against an already configured appliance is
unsupported, hilarity may ensue.
"""
import argparse
import sys
from cfme.utils.appliance import IPAppliance
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('address',
help='hostname or ip address of target appliance')
parser.add_argument('--region', default=0, type=int,
help='region to assign to the new DB')
args = parser.parse_args()
print('Initializing Appliance Internal DB')
ip_a = IPAppliance(args.address)
status, out = ip_a.db.enable_internal(args.region)
if status != 0:
print('Enabling DB failed with error:')
print(out)
sys.exit(1)
else:
print('DB Enabled, evm watchdog should start the UI shortly.')
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
ryenus/vbox | src/VBox/HostServices/SharedOpenGL/crserverlib/get_sizes.py | 1 | 17633 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
num_get_values = {
'GL_ACCUM_ALPHA_BITS' : 1,
'GL_ACCUM_BLUE_BITS' : 1,
'GL_ACCUM_CLEAR_VALUE': 4,
'GL_ACCUM_GREEN_BITS' : 1,
'GL_ACCUM_RED_BITS' : 1,
'GL_ALPHA_BIAS' : 1,
'GL_ALPHA_BITS' : 1,
'GL_ALPHA_SCALE' : 1,
'GL_ALPHA_TEST' : 1,
'GL_ALPHA_TEST_FUNC' : 1,
'GL_ALPHA_TEST_REF' : 1,
'GL_ATTRIB_STACK_DEPTH' : 1,
'GL_AUTO_NORMAL' : 1,
'GL_AUX_BUFFERS' : 1,
'GL_BLEND' : 1,
'GL_BLEND_DST' : 1,
'GL_BLEND_SRC' : 1,
'GL_BLUE_BIAS' : 1,
'GL_BLUE_BITS' : 1,
'GL_BLUE_SCALE' : 1,
'GL_CLIENT_ATTRIB_STACK_DEPTH' : 1,
'GL_CLIP_PLANE0' : 1,
'GL_CLIP_PLANE1' : 1,
'GL_CLIP_PLANE2' : 1,
'GL_CLIP_PLANE3' : 1,
'GL_CLIP_PLANE4' : 1,
'GL_CLIP_PLANE5' : 1,
'GL_COLOR_ARRAY' : 1,
'GL_COLOR_ARRAY_SIZE' : 1,
'GL_COLOR_ARRAY_STRIDE' : 1,
'GL_COLOR_ARRAY_TYPE' : 1,
'GL_COLOR_CLEAR_VALUE': 4,
'GL_COLOR_LOGIC_OP' : 1,
'GL_COLOR_MATERIAL' : 1,
'GL_COLOR_MATERIAL_FACE' : 1,
'GL_COLOR_MATERIAL_PARAMETER' : 1,
'GL_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_COLOR_WRITEMASK': 4,
'GL_CULL_FACE' : 1,
'GL_CULL_FACE_MODE' : 1,
'GL_CURRENT_COLOR': 4,
'GL_CURRENT_INDEX' : 1,
'GL_CURRENT_NORMAL': 3,
'GL_CURRENT_RASTER_COLOR': 4,
'GL_CURRENT_RASTER_DISTANCE' : 1,
'GL_CURRENT_RASTER_INDEX' : 1,
'GL_CURRENT_RASTER_POSITION': 4,
'GL_CURRENT_RASTER_POSITION_VALID' : 1,
'GL_CURRENT_RASTER_TEXTURE_COORDS': 4,
'GL_CURRENT_TEXTURE_COORDS': 4,
'GL_DEPTH_BIAS' : 1,
'GL_DEPTH_BITS' : 1,
'GL_DEPTH_CLEAR_VALUE' : 1,
'GL_DEPTH_FUNC' : 1,
'GL_DEPTH_RANGE': 2,
'GL_DEPTH_SCALE' : 1,
'GL_DEPTH_TEST' : 1,
'GL_DEPTH_WRITEMASK' : 1,
'GL_DITHER' : 1,
'GL_DOUBLEBUFFER' : 1,
'GL_DRAW_BUFFER' : 1,
'GL_EDGE_FLAG' : 1,
'GL_EDGE_FLAG_ARRAY' : 1,
'GL_EDGE_FLAG_ARRAY_STRIDE' : 1,
'GL_FEEDBACK_BUFFER_SIZE' : 1,
'GL_FEEDBACK_BUFFER_TYPE' : 1,
'GL_FOG' : 1,
'GL_FOG_COLOR': 4,
'GL_FOG_DENSITY' : 1,
'GL_FOG_END' : 1,
'GL_FOG_HINT' : 1,
'GL_FOG_INDEX' : 1,
'GL_FOG_MODE' : 1,
'GL_FOG_START' : 1,
'GL_FRONT_FACE' : 1,
'GL_GREEN_BIAS' : 1,
'GL_GREEN_BITS' : 1,
'GL_GREEN_SCALE' : 1,
'GL_INDEX_ARRAY' : 1,
'GL_INDEX_ARRAY_STRIDE' : 1,
'GL_INDEX_ARRAY_TYPE' : 1,
'GL_INDEX_BITS' : 1,
'GL_INDEX_CLEAR_VALUE' : 1,
'GL_INDEX_LOGIC_OP' : 1,
'GL_INDEX_MODE' : 1,
'GL_INDEX_OFFSET' : 1,
'GL_INDEX_SHIFT' : 1,
'GL_INDEX_WRITEMASK' : 1,
'GL_LIGHT0' : 1,
'GL_LIGHT1' : 1,
'GL_LIGHT2' : 1,
'GL_LIGHT3' : 1,
'GL_LIGHT4' : 1,
'GL_LIGHT5' : 1,
'GL_LIGHT6' : 1,
'GL_LIGHT7' : 1,
'GL_LIGHTING' : 1,
'GL_LIGHT_MODEL_AMBIENT': 4,
'GL_LIGHT_MODEL_LOCAL_VIEWER' : 1,
'GL_LIGHT_MODEL_TWO_SIDE' : 1,
'GL_LINE_SMOOTH' : 1,
'GL_LINE_SMOOTH_HINT' : 1,
'GL_LINE_STIPPLE' : 1,
'GL_LINE_STIPPLE_PATTERN' : 1,
'GL_LINE_STIPPLE_REPEAT' : 1,
'GL_LINE_WIDTH' : 1,
'GL_LINE_WIDTH_GRANULARITY' : 1,
'GL_LINE_WIDTH_RANGE': 2,
'GL_LIST_BASE' : 1,
'GL_LIST_INDEX' : 1,
'GL_LIST_MODE' : 1,
'GL_LOGIC_OP_MODE' : 1,
'GL_MAP1_COLOR_4' : 1,
'GL_MAP1_GRID_DOMAIN': 2,
'GL_MAP1_GRID_SEGMENTS' : 1,
'GL_MAP1_INDEX' : 1,
'GL_MAP1_NORMAL' : 1,
'GL_MAP1_TEXTURE_COORD_1' : 1,
'GL_MAP1_TEXTURE_COORD_2' : 1,
'GL_MAP1_TEXTURE_COORD_3' : 1,
'GL_MAP1_TEXTURE_COORD_4' : 1,
'GL_MAP1_VERTEX_3' : 1,
'GL_MAP1_VERTEX_4' : 1,
'GL_MAP2_COLOR_4' : 1,
'GL_MAP2_GRID_DOMAIN': 4,
'GL_MAP2_GRID_SEGMENTS': 2,
'GL_MAP2_INDEX' : 1,
'GL_MAP2_NORMAL' : 1,
'GL_MAP2_TEXTURE_COORD_1' : 1,
'GL_MAP2_TEXTURE_COORD_2' : 1,
'GL_MAP2_TEXTURE_COORD_3' : 1,
'GL_MAP2_TEXTURE_COORD_4' : 1,
'GL_MAP2_VERTEX_3' : 1,
'GL_MAP2_VERTEX_4' : 1,
'GL_MAP_COLOR' : 1,
'GL_MAP_STENCIL' : 1,
'GL_MATRIX_MODE' : 1,
'GL_MAX_CLIENT_ATTRIB_STACK_DEPTH' : 1,
'GL_MAX_ATTRIB_STACK_DEPTH' : 1,
'GL_MAX_CLIP_PLANES' : 1,
'GL_MAX_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_MAX_EVAL_ORDER' : 1,
'GL_MAX_LIGHTS' : 1,
'GL_MAX_LIST_NESTING' : 1,
'GL_MAX_MODELVIEW_STACK_DEPTH' : 1,
'GL_MAX_NAME_STACK_DEPTH' : 1,
'GL_MAX_PIXEL_MAP_TABLE' : 1,
'GL_MAX_PROJECTION_STACK_DEPTH' : 1,
'GL_MAX_TEXTURE_SIZE' : 1,
'GL_MAX_3D_TEXTURE_SIZE' : 1,
'GL_MAX_TEXTURE_STACK_DEPTH' : 1,
'GL_MAX_VIEWPORT_DIMS': 2,
'GL_MODELVIEW_MATRIX': 16,
'GL_MODELVIEW_STACK_DEPTH' : 1,
'GL_NAME_STACK_DEPTH' : 1,
'GL_NORMAL_ARRAY' : 1,
'GL_NORMAL_ARRAY_STRIDE' : 1,
'GL_NORMAL_ARRAY_TYPE' : 1,
'GL_NORMALIZE' : 1,
'GL_PACK_ALIGNMENT' : 1,
'GL_PACK_LSB_FIRST' : 1,
'GL_PACK_ROW_LENGTH' : 1,
'GL_PACK_SKIP_PIXELS' : 1,
'GL_PACK_SKIP_ROWS' : 1,
'GL_PACK_SWAP_BYTES' : 1,
'GL_PERSPECTIVE_CORRECTION_HINT' : 1,
'GL_PIXEL_MAP_A_TO_A_SIZE' : 1,
'GL_PIXEL_MAP_B_TO_B_SIZE' : 1,
'GL_PIXEL_MAP_G_TO_G_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_A_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_B_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_G_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_I_SIZE' : 1,
'GL_PIXEL_MAP_I_TO_R_SIZE' : 1,
'GL_PIXEL_MAP_R_TO_R_SIZE' : 1,
'GL_PIXEL_MAP_S_TO_S_SIZE' : 1,
'GL_POINT_SIZE' : 1,
'GL_POINT_SIZE_GRANULARITY' : 1,
'GL_POINT_SIZE_RANGE': 2,
'GL_POINT_SMOOTH' : 1,
'GL_POINT_SMOOTH_HINT' : 1,
'GL_POLYGON_MODE': 2,
'GL_POLYGON_OFFSET_FACTOR' : 1,
'GL_POLYGON_OFFSET_UNITS' : 1,
'GL_POLYGON_OFFSET_FILL' : 1,
'GL_POLYGON_OFFSET_LINE' : 1,
'GL_POLYGON_OFFSET_POINT' : 1,
'GL_POLYGON_SMOOTH' : 1,
'GL_POLYGON_SMOOTH_HINT' : 1,
'GL_POLYGON_STIPPLE' : 1,
'GL_PROJECTION_MATRIX': 16,
'GL_PROJECTION_STACK_DEPTH' : 1,
'GL_READ_BUFFER' : 1,
'GL_RED_BIAS' : 1,
'GL_RED_BITS' : 1,
'GL_RED_SCALE' : 1,
'GL_RENDER_MODE' : 1,
'GL_RGBA_MODE' : 1,
'GL_SCISSOR_BOX': 4,
'GL_SCISSOR_TEST' : 1,
'GL_SELECTION_BUFFER_SIZE' : 1,
'GL_SHADE_MODEL' : 1,
'GL_STENCIL_BITS' : 1,
'GL_STENCIL_CLEAR_VALUE' : 1,
'GL_STENCIL_FAIL' : 1,
'GL_STENCIL_FUNC' : 1,
'GL_STENCIL_PASS_DEPTH_FAIL' : 1,
'GL_STENCIL_PASS_DEPTH_PASS' : 1,
'GL_STENCIL_REF' : 1,
'GL_STENCIL_TEST' : 1,
'GL_STENCIL_VALUE_MASK' : 1,
'GL_STENCIL_WRITEMASK' : 1,
'GL_STEREO' : 1,
'GL_SUBPIXEL_BITS' : 1,
'GL_TEXTURE_1D' : 1,
'GL_TEXTURE_2D' : 1,
'GL_TEXTURE_BINDING_1D' : 1,
'GL_TEXTURE_BINDING_2D' : 1,
'GL_TEXTURE_BINDING_3D' : 1,
'GL_TEXTURE_COORD_ARRAY' : 1,
'GL_TEXTURE_COORD_ARRAY_SIZE' : 1,
'GL_TEXTURE_COORD_ARRAY_STRIDE' : 1,
'GL_TEXTURE_COORD_ARRAY_TYPE' : 1,
'GL_TEXTURE_ENV_COLOR': 4,
'GL_TEXTURE_ENV_MODE' : 1,
'GL_TEXTURE_GEN_Q' : 1,
'GL_TEXTURE_GEN_R' : 1,
'GL_TEXTURE_GEN_S' : 1,
'GL_TEXTURE_GEN_T' : 1,
'GL_TEXTURE_MATRIX': 16,
'GL_TEXTURE_STACK_DEPTH' : 1,
'GL_UNPACK_ALIGNMENT' : 1,
'GL_UNPACK_LSB_FIRST' : 1,
'GL_UNPACK_ROW_LENGTH' : 1,
'GL_UNPACK_SKIP_PIXELS' : 1,
'GL_UNPACK_SKIP_ROWS' : 1,
'GL_UNPACK_SWAP_BYTES' : 1,
'GL_VERTEX_ARRAY' : 1,
'GL_VERTEX_ARRAY_SIZE' : 1,
'GL_VERTEX_ARRAY_STRIDE' : 1,
'GL_VERTEX_ARRAY_TYPE' : 1,
'GL_VIEWPORT': 4,
'GL_ZOOM_X' : 1,
'GL_ZOOM_Y' : 1,
#GL_ARB_IMAGING which is part of 1.2.1
'GL_COLOR_MATRIX' : 16,
'GL_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_COLOR_TABLE' : 1,
'GL_POST_CONVOLUTION_COLOR_TABLE' : 1,
'GL_POST_COLOR_MATRIX_COLOR_TABLE' : 1,
'GL_PROXY_COLOR_TABLE' : 1,
'GL_CONVOLUTION_1D' : 1,
'GL_CONVOLUTION_2D' : 1,
'GL_SEPARABLE_2D' : 1,
'GL_POST_CONVOLUTION_RED_SCALE' : 1,
'GL_POST_CONVOLUTION_GREEN_SCALE' : 1,
'GL_POST_CONVOLUTION_BLUE_SCALE' : 1,
'GL_POST_CONVOLUTION_ALPHA_SCALE' : 1,
'GL_POST_CONVOLUTION_RED_BIAS' : 1,
'GL_POST_CONVOLUTION_GREEN_BIAS' : 1,
'GL_POST_CONVOLUTION_BLUE_BIAS' : 1,
'GL_POST_CONVOLUTION_ALPHA_BIAS' : 1,
'GL_HISTOGRAM' : 1,
'GL_MINMAX' : 1,
'GL_MAX_COLOR_MATRIX_STACK_DEPTH' : 1,
'GL_MAX_CONVOLUTION_WIDTH' : 1,
'GL_MAX_CONVOLUTION_HEIGHT' : 1,
}
extensions_num_get_values = {
'GL_BLEND_COLOR_EXT': (4, 'CR_EXT_blend_color'),
'GL_BLEND_EQUATION_EXT': (1, 'CR_EXT_blend_minmax'),
'GL_BLEND_SRC_RGB_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_BLEND_DST_RGB_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_BLEND_SRC_ALPHA_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_BLEND_DST_ALPHA_EXT': (1, 'CR_EXT_blend_func_separate'),
'GL_FOG_DISTANCE_MODE_NV': (1, 'CR_NV_fog_distance'),
'GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB': (1, 'CR_ARB_texture_cube_map'),
'GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT': (1, 'CR_EXT_texture_filter_anisotropic'),
'GL_TEXTURE_BINDING_CUBE_MAP_ARB': (1, 'CR_ARB_texture_cube_map'),
'GL_TEXTURE_CUBE_MAP_ARB': (1, 'CR_ARB_texture_cube_map'),
'GL_ACTIVE_TEXTURE_ARB': (1, 'CR_ARB_multitexture'),
'GL_CLIENT_ACTIVE_TEXTURE_ARB': (1, 'CR_ARB_multitexture'),
'GL_MAX_TEXTURE_UNITS_ARB': (1, 'CR_ARB_multitexture'),
'GL_NUM_GENERAL_COMBINERS_NV': (1, 'CR_NV_register_combiners'),
'GL_MAX_GENERAL_COMBINERS_NV': (1, 'CR_NV_register_combiners'),
'GL_COLOR_SUM_CLAMP_NV': (1, 'CR_NV_register_combiners'),
'GL_CONSTANT_COLOR0_NV': (4, 'CR_NV_register_combiners'),
'GL_CONSTANT_COLOR1_NV': (4, 'CR_NV_register_combiners'),
'GL_PER_STAGE_CONSTANTS_NV': (1, 'CR_NV_register_combiners2'),
'GL_LIGHT_MODEL_COLOR_CONTROL_EXT': (1, 'CR_EXT_separate_specular_color'),
'GL_COLOR_SUM_EXT': (1, 'CR_EXT_secondary_color'),
'GL_CURRENT_SECONDARY_COLOR_EXT': (4, 'CR_EXT_secondary_color'),
'GL_SECONDARY_COLOR_ARRAY_SIZE_EXT': (1, 'CR_EXT_secondary_color'),
'GL_SECONDARY_COLOR_ARRAY_TYPE_EXT': (1, 'CR_EXT_secondary_color'),
'GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT': (1, 'CR_EXT_secondary_color'),
'GL_RESCALE_NORMAL': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_NUM_COMPRESSED_TEXTURE_FORMATS': (1, 'CR_ARB_texture_compression'),
'GL_TEXTURE_3D': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_LIGHT_MODEL_COLOR_CONTROL': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_UNPACK_IMAGE_HEIGHT': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_UNPACK_SKIP_IMAGES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_PACK_IMAGE_HEIGHT': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_PACK_SKIP_IMAGES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_ALIASED_POINT_SIZE_RANGE': (2, 'CR_OPENGL_VERSION_1_2'),
'GL_ALIASED_LINE_WIDTH_RANGE': (2, 'CR_OPENGL_VERSION_1_2'),
'GL_MAX_ELEMENTS_INDICES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_MAX_ELEMENTS_VERTICES': (1, 'CR_OPENGL_VERSION_1_2'),
'GL_MULTISAMPLE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_ALPHA_TO_COVERAGE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_ALPHA_TO_ONE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_COVERAGE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_BUFFERS_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLES_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_COVERAGE_VALUE_ARB': (1, 'CR_ARB_multisample'),
'GL_SAMPLE_COVERAGE_INVERT_ARB': (1, 'CR_ARB_multisample'),
'GL_POINT_SPRITE_ARB': (1, 'CR_ARB_point_sprite'),
'GL_MAX_TEXTURE_LOD_BIAS_EXT': (1, 'CR_EXT_texture_lod_bias'),
'GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB': (1, 'CR_ARB_texture_compression'),
'GL_PROGRAM_ERROR_POSITION_NV': (1, 'CR_NV_vertex_program'),
'GL_VERTEX_PROGRAM_BINDING_NV': (1, 'CR_NV_vertex_program'),
'GL_MAX_VERTEX_ATTRIBS_ARB': (1, 'CR_ARB_vertex_program'),
'GL_MAX_TEXTURE_COORDS_ARB': (1, 'CR_ARB_vertex_program'),
'GL_PROGRAM_ERROR_POSITION_NV': (1, 'CR_NV_fragment_program'),
'GL_FRAGMENT_PROGRAM_BINDING_NV': (1, 'CR_NV_fragment_program'),
'GL_MAX_RECTANGLE_TEXTURE_SIZE_NV': (1, 'CR_NV_texture_rectangle'),
'GL_TEXTURE_RECTANGLE_NV': (1, 'CR_NV_texture_rectangle'),
'GL_TEXTURE_BINDING_RECTANGLE_NV': (1, 'CR_NV_texture_rectangle'),
'GL_CLIP_VOLUME_CLIPPING_HINT_EXT' : (3, 'CR_EXT_clip_volume_hint'),
'GL_RASTER_POSITION_UNCLIPPED_IBM' : (1, 'CR_IBM_rasterpos_clip'),
'GL_GENERATE_MIPMAP_HINT_SGIS' : (1, 'CR_SGIS_generate_mipmap'),
'GL_CURRENT_FOG_COORDINATE_EXT' : (1, 'CR_EXT_fog_coord'),
'GL_FOG_COORDINATE_ARRAY_TYPE_EXT' : (1, 'CR_EXT_fog_coord'),
'GL_FOG_COORDINATE_ARRAY_STRIDE_EXT' : (1, 'CR_EXT_fog_coord'),
'GL_TRANSPOSE_COLOR_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_TRANSPOSE_MODELVIEW_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_TRANSPOSE_PROJECTION_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_TRANSPOSE_TEXTURE_MATRIX_ARB': (16, 'CR_ARB_transpose_matrix'),
'GL_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_VERTEX_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_NORMAL_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_COLOR_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_INDEX_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB': (1, 'CR_ARB_vertex_buffer_object'),
'GL_MAX_TEXTURE_IMAGE_UNITS_ARB': (1, 'CR_ARB_fragment_program'),
# We don't support GL_ARB_draw_buffers, but for some reason ubuntu64 8.10 vm queries it on macos host
'GL_MAX_DRAW_BUFFERS_ARB': (1, 'VBOX'),
'GL_MAX_PROGRAM_MATRICES_ARB': (1, 'CR_ARB_vertex_program'),
'GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB': (1, 'CR_ARB_vertex_program'),
# Vertex shaders (2.0) #
'GL_MAX_VERTEX_UNIFORM_COMPONENTS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_VARYING_FLOATS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_VERTEX_ATTRIBS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_TEXTURE_IMAGE_UNITS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_MAX_TEXTURE_COORDS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_VERTEX_PROGRAM_POINT_SIZE': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_VERTEX_PROGRAM_TWO_SIDE': (1, 'CR_OPENGL_VERSION_2_0'),
# Fragment shaders (2.0) #
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT': (1, 'CR_OPENGL_VERSION_2_0'),
# Draw buffers (2.0) #
'GL_MAX_DRAW_BUFFERS': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER0': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER1': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER2': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER3': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER4': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER5': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER6': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER7': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER8': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER9': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER10': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER11': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER12': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER13': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER14': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_DRAW_BUFFER15': (1, 'CR_OPENGL_VERSION_2_0'),
# Point sprite (2.0) #
'GL_POINT_SPRITE': (1, 'CR_OPENGL_VERSION_2_0'),
# Separate stencil (2.0) #
'GL_STENCIL_BACK_FUNC': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_REF': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_VALUE_MASK': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_FAIL': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_PASS_DEPTH_FAIL': (1, 'CR_OPENGL_VERSION_2_0'),
'GL_STENCIL_BACK_PASS_DEPTH_PASS': (1, 'CR_OPENGL_VERSION_2_0'),
# Frame buffer object EXT #
'GL_FRAMEBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_object'),
'GL_RENDERBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_object'),
'GL_MAX_COLOR_ATTACHMENTS_EXT': (1, 'CR_EXT_framebuffer_object'),
'GL_MAX_RENDERBUFFER_SIZE_EXT': (1, 'CR_EXT_framebuffer_object'),
# ARB_shader_objects
'GL_CURRENT_PROGRAM': (1, 'CR_ARB_shader_objects'),
# EXT_framebuffer_blit
'GL_READ_FRAMEBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_blit'),
'GL_DRAW_FRAMEBUFFER_BINDING_EXT': (1, 'CR_EXT_framebuffer_blit'),
# EXT_stencil_two_side
'GL_ACTIVE_STENCIL_FACE_EXT': (1, 'CR_EXT_stencil_two_side'),
}
get_keys = num_get_values.keys() + extensions_num_get_values.keys()
get_keys.sort()
print "struct nv_struct { GLenum pname; unsigned int num_values; } num_values_array[] = {"
for key in get_keys:
try:
print '\t{ %s, %d },' % (key, num_get_values[key])
except KeyError:
(nv, ifdef) = extensions_num_get_values[key]
print '#ifdef %s' % ifdef
print '\t{ %s, %d },' % (key, nv)
print '#endif /* %s */' % ifdef
print "\t{ 0, 0 }"
print "};"
print """
static unsigned int __numValues( GLenum pname )
{
struct nv_struct *temp;
for (temp = num_values_array; temp->num_values != 0 ; temp++)
{
if (temp->pname == pname)
return temp->num_values;
}
crDebug( "Invalid pname to __numValues: 0x%x\\n", (int) pname );
return 0;
}
"""
| gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/frame/test_nonunique_indexes.py | 2 | 18038 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = list(range(2))
df = DataFrame(arr, columns=["A", "A"])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range("20130101", periods=4, freq="Q-NOV")
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]
)
df.columns = idx
expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["string"] = "bah"
expected = DataFrame(
[[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],
columns=["foo", "bar", "foo", "hello", "string"],
)
check(df, expected)
with pytest.raises(ValueError, match="Length of value"):
df.insert(0, "AnotherColumn", range(len(df.index) - 1))
# insert same dtype
df["foo2"] = 3
expected = DataFrame(
[[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# set (non-dup)
df["foo2"] = 4
expected = DataFrame(
[[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
df["foo2"] = 3
# delete (non dup)
del df["bar"]
expected = DataFrame(
[[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],
columns=["foo", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# try to delete again (its not consolidated)
del df["hello"]
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# insert
df.insert(2, "new_col", 5.0)
expected = DataFrame(
[[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],
columns=["foo", "foo", "new_col", "string", "foo2"],
)
check(df, expected)
# insert a dup
with pytest.raises(ValueError, match="cannot insert"):
df.insert(2, "new_col", 4.0)
df.insert(2, "new_col", 4.0, allow_duplicates=True)
expected = DataFrame(
[
[1, 1, 4.0, 5.0, "bah", 3],
[1, 2, 4.0, 5.0, "bah", 3],
[2, 3, 4.0, 5.0, "bah", 3],
],
columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],
)
check(df, expected)
# delete (dup)
del df["foo"]
expected = DataFrame(
[[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],
columns=["new_col", "new_col", "string", "foo2"],
)
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame(
[[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],
columns=["foo", "bar", "foo", "hello"],
)
check(df)
df["foo2"] = 7.0
expected = DataFrame(
[[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
result = df["foo"]
expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])
check(result, expected)
# multiple replacements
df["foo"] = "string"
expected = DataFrame(
[
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
del df["foo"]
expected = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]
)
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]},
index=MultiIndex.from_tuples(
[(600809, 20130331)], names=["STK_ID", "RPT_Date"]
),
)
df5 = DataFrame(
{
"RPT_Date": [20120930, 20121231, 20130331],
"STK_ID": [600809] * 3,
"STK_Name": ["饡驦", "饡驦", "饡驦"],
"TClose": [38.05, 41.66, 30.01],
},
index=MultiIndex.from_tuples(
[(600809, 20120930), (600809, 20121231), (600809, 20130331)],
names=["STK_ID", "RPT_Date"],
),
)
k = pd.merge(df4, df5, how="inner", left_index=True, right_index=True)
result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"})
str(result)
result.dtypes
expected = DataFrame(
[[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]],
columns=[
"RT",
"TClose",
"TExg",
"RPT_Date",
"STK_ID",
"STK_Name",
"QT_Close",
],
).set_index(["STK_ID", "RPT_Date"], drop=False)
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar", "foo"])
# drop
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
result = df.drop(["a"], axis=1)
expected = DataFrame([[1], [1], [1]], columns=["bar"])
check(result, expected)
result = df.drop("a", axis=1)
check(result, expected)
# describe
df = DataFrame(
[[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=["bar", "a", "a"],
dtype="float64",
)
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "A"],
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"], "A": expected_ser},
columns=["A", "B", "A"],
)
this_df["A"] = index
check(this_df, expected_df)
# operations
for op in ["__add__", "__mul__", "__sub__", "__truediv__"]:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ["A", "A"]
df.columns = ["A", "A"]
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=["that", "that"])
expected = DataFrame(1.0, index=range(5), columns=["that", "that"])
df["that"] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=["that", "that"])
expected = DataFrame(1, index=range(5), columns=["that", "that"])
df["that"] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop("C", axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=["A", "B", "C"], how="all")
expected.columns = ["A", "A", "B", "C"]
df.columns = ["A", "A", "B", "C"]
result = df.dropna(subset=["A", "C"], how="all")
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ["A", "A", "C", "D"]
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
# dup aligning operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])
# not-comparing like-labelled
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame(
[[False, True], [True, False], [False, False], [True, False]],
columns=["A", "A"],
)
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame(
{
"one": Series([True, True, False], index=["a", "b", "c"]),
"two": Series([False, False, True, False], index=["a", "b", "c", "d"]),
"three": Series([False, True, True, True], index=["a", "b", "c", "d"]),
}
)
expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)
result = dfbool[["one", "three", "one"]]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]].copy()
expected = z.loc[["a", "c", "a"]]
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]]
result = z.loc[["a", "c", "a"]]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame(
{"A": np.arange(5, dtype="int64"), "B": np.arange(1, 6, dtype="int64")},
index=[2, 2, 3, 3, 4],
)
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame(
{
"A": date_range("20130101", periods=5),
"B": date_range("20130101 09:00:00", periods=5),
},
index=[2, 2, 3, 3, 4],
)
result = df.B - df.A
expected = Series(pd.Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["a", "a.1"]
str(df)
expected = DataFrame([[1, 2]], columns=["a", "a.1"])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"])
df.columns = ["b", "a", "a.1"]
str(df)
expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["b", "b"]
str(df)
expected = DataFrame([[1, 2]], columns=["b", "b"])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]],
columns=["a", "a", "b", "b", "d", "c", "c"],
)
df.columns = list("ABCDEFG")
str(df)
expected = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG")
)
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"])
df.columns = ["a", "a.1", "a.2", "a.3"]
str(df)
expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"])
assert_frame_equal(df, expected)
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype="float64")
df_int = DataFrame(np.random.randn(10, 3), dtype="int64")
df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns)
df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns)
df_dt = DataFrame(
pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns
)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.0], [2, -2, 3.0]]
rs = DataFrame(vals, columns=["A", "A", "B"])
xp = DataFrame(vals)
xp.columns = ["A", "A", "B"]
assert_frame_equal(rs, xp)
def test_values_duplicates(self):
df = DataFrame(
[[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"]
)
result = df.values
expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list("AAA")
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
exp = pd.DataFrame(
[["a", "d", "g"], ["b", "e", "h"], ["c", "f", "i"]], columns=["A", "A", "A"]
)
assert_frame_equal(df, exp)
| apache-2.0 |
ZuluPro/libcloud | libcloud/dns/drivers/onapp.py | 10 | 10684 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OnApp DNS Driver
"""
__all__ = [
'OnAppDNSDriver'
]
import json
from libcloud.common.onapp import OnAppConnection
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.base import DNSDriver, Zone, Record
DEFAULT_ZONE_TTL = 1200
class OnAppDNSDriver(DNSDriver):
type = Provider.ONAPP
name = 'OnApp'
website = 'http://onapp.com/'
connectionCls = OnAppConnection
RECORD_TYPE_MAP = {
RecordType.SOA: 'SOA',
RecordType.NS: 'NS',
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
}
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
response = self.connection.request('/dns_zones.json')
zones = self._to_zones(response.object)
return zones
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
response = self.connection.request('/dns_zones/%s.json' % zone_id)
zone = self._to_zone(response.object)
return zone
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (All zones are master by design).
:type type: ``str``
:param ttl: TTL for new records. (This is not really used)
:type ttl: ``int``
:param extra: Extra attributes (set auto_populate: 0 if you
don't want to auto populate with existing DNS records). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
For more info, please see:
https://docs.onapp.com/display/52API/Add+DNS+Zone
"""
dns_zone = {'name': domain}
if extra is not None:
dns_zone.update(extra)
dns_zone_data = json.dumps({'dns_zone': dns_zone})
response = self.connection.request(
'/dns_zones.json',
method='POST',
headers={"Content-type": "application/json"},
data=dns_zone_data)
zone = self._to_zone(response.object)
return zone
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will also delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
self.connection.request(
'/dns_zones/%s.json' % zone.id,
method='DELETE')
return True
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
response = self.connection.request(
'/dns_zones/%s/records.json' % zone.id)
dns_records = response.object['dns_zone']['records']
records = self._to_records(dns_records, zone)
return records
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
response = self.connection.request('/dns_zones/%s/records/%s.json' %
(zone_id, record_id))
record = self._to_record(response.object, zone_id=zone_id)
return record
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
Used only for A and AAAA record types.
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
For more info, please see:
https://docs.onapp.com/display/52API/Add+DNS+Record
"""
dns_record = self._format_record(name, type, data, extra)
dns_record_data = json.dumps({'dns_record': dns_record})
response = self.connection.request(
'/dns_zones/%s/records.json' % zone.id,
method='POST',
headers={"Content-type": "application/json"},
data=dns_record_data)
record = self._to_record(response.object, zone=zone)
return record
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
Used only for A and AAAA record types.
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
For more info, please see:
https://docs.onapp.com/display/52API/Edit+DNS+Records
"""
zone = record.zone
dns_record = self._format_record(name, type, data, extra)
dns_record_data = json.dumps({'dns_record': dns_record})
self.connection.request(
'/dns_zones/%s/records/%s.json' % (zone.id, record.id),
method='PUT',
headers={"Content-type": "application/json"},
data=dns_record_data)
record = self.get_record(zone.id, record.id)
return record
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
For more info, please see:
https://docs.onapp.com/display/52API/Delete+DNS+Record
"""
zone_id = record.zone.id
self.connection.request('/dns_zones/%s/records/%s.json' % (zone_id,
record.id), method='DELETE')
return True
#
# Helper methods
#
def _format_record(self, name, type, data, extra):
if name is '':
name = '@'
if extra is None:
extra = {}
record_type = self.RECORD_TYPE_MAP[type]
new_record = {
'name': name,
'ttl': extra.get('ttl', DEFAULT_ZONE_TTL),
'type': record_type
}
if type == RecordType.MX:
additions = {
'priority': extra.get('priority', 1),
'hostname': extra.get('hostname')
}
elif type == RecordType.SRV:
additions = {
'port': extra.get('port'),
'weight': extra.get('weight', 1),
'priority': extra.get('priority', 1),
'hostname': extra.get('hostname')
}
elif type == RecordType.A:
additions = {'ip': data}
elif type == RecordType.CNAME:
additions = {'hostname': extra.get('hostname')}
elif type == RecordType.AAAA:
additions = {'ip': data}
elif type == RecordType.TXT:
additions = {'txt': extra.get('txt')}
elif type == RecordType.NS:
additions = {'hostname': extra.get('hostname')}
new_record.update(additions)
return new_record
def _to_zones(self, data):
zones = []
for zone in data:
_zone = self._to_zone(zone)
zones.append(_zone)
return zones
def _to_zone(self, data):
dns_zone = data.get('dns_zone')
id = dns_zone.get('id')
name = dns_zone.get('name')
extra = {'user_id': dns_zone.get('user_id'),
'cdn_reference': dns_zone.get('cdn_reference'),
'created_at': dns_zone.get('created_at'),
'updated_at': dns_zone.get('updated_at')}
type = 'master'
return Zone(id=id, domain=name, type=type, ttl=DEFAULT_ZONE_TTL,
driver=self, extra=extra)
def _to_records(self, data, zone):
records = []
data = data.values()
for data_type in data:
for item in data_type:
record = self._to_record(item, zone=zone)
records.append(record)
records.sort(key=lambda x: x.id, reverse=False)
return records
def _to_record(self, data, zone_id=None, zone=None):
if not zone: # We need zone_id or zone
zone = self.get_zone(zone_id)
record = data.get('dns_record')
id = record.get('id')
name = record.get('name')
type = record.get('type')
ttl = record.get('ttl', None)
return Record(id=id, name=name, type=type, data=record, zone=zone,
driver=self, ttl=ttl, extra={})
| apache-2.0 |
gistic/PublicSpatialImpala | thirdparty/thrift-0.9.0/lib/py/src/TSerialization.py | 184 | 1387 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from protocol import TBinaryProtocol
from transport import TTransport
def serialize(thrift_object,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer()
protocol = protocol_factory.getProtocol(transport)
thrift_object.write(protocol)
return transport.getvalue()
def deserialize(base,
buf,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer(buf)
protocol = protocol_factory.getProtocol(transport)
base.read(protocol)
return base
| apache-2.0 |
gabrielcnr/pytest | testing/test_pluginmanager.py | 4 | 12014 | import pytest
import py
import os
from _pytest.config import get_config, PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
@pytest.fixture
def pytestpm():
return PytestPluginManager()
class TestPytestPluginInteractions:
def test_addhooks_conftestplugin(self, testdir):
testdir.makepyfile(newhooks="""
def pytest_myhook(xyz):
"new hook"
""")
conf = testdir.makeconftest("""
import sys ; sys.path.insert(0, '.')
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(newhooks)
def pytest_myhook(xyz):
return xyz + 1
""")
config = get_config()
pm = config.pluginmanager
pm.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=config.pluginmanager))
config.pluginmanager._importconftest(conf)
#print(config.pluginmanager.get_plugins())
res = config.hook.pytest_myhook(xyz=10)
assert res == [11]
def test_addhooks_nohooks(self, testdir):
testdir.makeconftest("""
import sys
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(sys)
""")
res = testdir.runpytest()
assert res.ret != 0
res.stderr.fnmatch_lines([
"*did not find*sys*"
])
def test_namespace_early_from_import(self, testdir):
p = testdir.makepyfile("""
from pytest import Item
from pytest import Item as Item2
assert Item is Item2
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_do_ext_namespace(self, testdir):
testdir.makeconftest("""
def pytest_namespace():
return {'hello': 'world'}
""")
p = testdir.makepyfile("""
from pytest import hello
import pytest
def test_hello():
assert hello == "world"
assert 'hello' in pytest.__all__
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_do_option_postinitialize(self, testdir):
config = testdir.parseconfigure()
assert not hasattr(config.option, 'test123')
p = testdir.makepyfile("""
def pytest_addoption(parser):
parser.addoption('--test123', action="store_true",
default=True)
""")
config.pluginmanager._importconftest(p)
assert config.option.test123
def test_configure(self, testdir):
config = testdir.parseconfig()
l = []
class A:
def pytest_configure(self, config):
l.append(self)
config.pluginmanager.register(A())
assert len(l) == 0
config._do_configure()
assert len(l) == 1
config.pluginmanager.register(A()) # leads to a configured() plugin
assert len(l) == 2
assert l[0] != l[1]
config._ensure_unconfigure()
config.pluginmanager.register(A())
assert len(l) == 2
def test_hook_tracing(self):
pytestpm = get_config().pluginmanager # fully initialized with plugins
saveindent = []
class api1:
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
class api2:
def pytest_plugin_registered(self):
saveindent.append(pytestpm.trace.root.indent)
raise ValueError()
l = []
pytestpm.trace.root.setwriter(l.append)
undo = pytestpm.enable_tracing()
try:
indent = pytestpm.trace.root.indent
p = api1()
pytestpm.register(p)
assert pytestpm.trace.root.indent == indent
assert len(l) >= 2
assert 'pytest_plugin_registered' in l[0]
assert 'finish' in l[1]
l[:] = []
with pytest.raises(ValueError):
pytestpm.register(api2())
assert pytestpm.trace.root.indent == indent
assert saveindent[0] > indent
finally:
undo()
def test_warn_on_deprecated_multicall(self, pytestpm):
warnings = []
class get_warnings:
def pytest_logwarning(self, message):
warnings.append(message)
class Plugin:
def pytest_configure(self, __multicall__):
pass
pytestpm.register(get_warnings())
before = list(warnings)
pytestpm.register(Plugin())
assert len(warnings) == len(before) + 1
assert "deprecated" in warnings[-1]
def test_warn_on_deprecated_addhooks(self, pytestpm):
warnings = []
class get_warnings:
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings.append(message)
class Plugin:
def pytest_testhook():
pass
pytestpm.register(get_warnings())
before = list(warnings)
pytestpm.addhooks(Plugin())
assert len(warnings) == len(before) + 1
assert "deprecated" in warnings[-1]
def test_namespace_has_default_and_env_plugins(testdir):
p = testdir.makepyfile("""
import pytest
pytest.mark
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*tryfirst*first*",
"*trylast*last*",
])
def test_importplugin_issue375(testdir, pytestpm):
testdir.syspathinsert(testdir.tmpdir)
testdir.makepyfile(qwe="import aaaa")
with pytest.raises(ImportError) as excinfo:
pytestpm.import_plugin("qwe")
assert "qwe" not in str(excinfo.value)
assert "aaaa" in str(excinfo.value)
class TestPytestPluginManager:
def test_register_imported_modules(self):
pm = PytestPluginManager()
mod = py.std.types.ModuleType("x.y.pytest_hello")
pm.register(mod)
assert pm.is_registered(mod)
l = pm.get_plugins()
assert mod in l
pytest.raises(ValueError, "pm.register(mod)")
pytest.raises(ValueError, lambda: pm.register(mod))
#assert not pm.is_registered(mod2)
assert pm.get_plugins() == l
def test_canonical_import(self, monkeypatch):
mod = py.std.types.ModuleType("pytest_xyz")
monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod)
pm = PytestPluginManager()
pm.import_plugin('pytest_xyz')
assert pm.get_plugin('pytest_xyz') == mod
assert pm.is_registered(mod)
def test_consider_module(self, testdir, pytestpm):
testdir.syspathinsert()
testdir.makepyfile(pytest_p1="#")
testdir.makepyfile(pytest_p2="#")
mod = py.std.types.ModuleType("temp")
mod.pytest_plugins = ["pytest_p1", "pytest_p2"]
pytestpm.consider_module(mod)
assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1"
assert pytestpm.get_plugin("pytest_p2").__name__ == "pytest_p2"
def test_consider_module_import_module(self, testdir):
pytestpm = get_config().pluginmanager
mod = py.std.types.ModuleType("x")
mod.pytest_plugins = "pytest_a"
aplugin = testdir.makepyfile(pytest_a="#")
reprec = testdir.make_hook_recorder(pytestpm)
#syspath.prepend(aplugin.dirpath())
py.std.sys.path.insert(0, str(aplugin.dirpath()))
pytestpm.consider_module(mod)
call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name)
assert call.plugin.__name__ == "pytest_a"
# check that it is not registered twice
pytestpm.consider_module(mod)
l = reprec.getcalls("pytest_plugin_registered")
assert len(l) == 1
def test_consider_env_fails_to_import(self, monkeypatch, pytestpm):
monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",")
with pytest.raises(ImportError):
pytestpm.consider_env()
def test_plugin_skip(self, testdir, monkeypatch):
p = testdir.makepyfile(skipping1="""
import pytest
pytest.skip("hello")
""")
p.copy(p.dirpath("skipping2.py"))
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"WI1*skipped plugin*skipping1*hello*",
"WI1*skipped plugin*skipping2*hello*",
])
def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm):
testdir.syspathinsert()
testdir.makepyfile(xy123="#")
monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123')
l1 = len(pytestpm.get_plugins())
pytestpm.consider_env()
l2 = len(pytestpm.get_plugins())
assert l2 == l1 + 1
assert pytestpm.get_plugin('xy123')
pytestpm.consider_env()
l3 = len(pytestpm.get_plugins())
assert l2 == l3
def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
testdir.makepyfile(pytest_x500="#")
p = testdir.makepyfile("""
import pytest
def test_hello(pytestconfig):
plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500')
assert plugin is not None
""")
monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
result = testdir.runpytest(p, syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_import_plugin_importname(self, testdir, pytestpm):
pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")')
testdir.syspathinsert()
pluginname = "pytest_hello"
testdir.makepyfile(**{pluginname: ""})
pytestpm.import_plugin("pytest_hello")
len1 = len(pytestpm.get_plugins())
pytestpm.import_plugin("pytest_hello")
len2 = len(pytestpm.get_plugins())
assert len1 == len2
plugin1 = pytestpm.get_plugin("pytest_hello")
assert plugin1.__name__.endswith('pytest_hello')
plugin2 = pytestpm.get_plugin("pytest_hello")
assert plugin2 is plugin1
def test_import_plugin_dotted_name(self, testdir, pytestpm):
pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")')
testdir.syspathinsert()
testdir.mkpydir("pkg").join("plug.py").write("x=3")
pluginname = "pkg.plug"
pytestpm.import_plugin(pluginname)
mod = pytestpm.get_plugin("pkg.plug")
assert mod.x == 3
def test_consider_conftest_deps(self, testdir, pytestpm):
mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
with pytest.raises(ImportError):
pytestpm.consider_conftest(mod)
class TestPytestPluginManagerBootstrapming:
def test_preparse_args(self, pytestpm):
pytest.raises(ImportError, lambda:
pytestpm.consider_preparse(["xyz", "-p", "hello123"]))
def test_plugin_prevent_register(self, pytestpm):
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
l1 = pytestpm.get_plugins()
pytestpm.register(42, name="abc")
l2 = pytestpm.get_plugins()
assert len(l2) == len(l1)
assert 42 not in l2
def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm):
pytestpm.register(42, name="abc")
l1 = pytestpm.get_plugins()
assert 42 in l1
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
l2 = pytestpm.get_plugins()
assert 42 not in l2
| mit |
dzbarsky/servo | components/script/dom/bindings/codegen/parser/tests/test_special_methods_uniqueness.py | 241 | 1433 | import WebIDL
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
getter deleter boolean (DOMString name);
getter boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
deleter boolean (DOMString name);
getter deleter boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
setter creator boolean (DOMString name);
creator boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodUniqueness1 {
setter boolean (DOMString name);
creator setter boolean (DOMString name);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
linktlh/Toontown-journey | toontown/coghq/LaserGameRoll.py | 4 | 1977 | import random
from direct.distributed import ClockDelta
from direct.task import Task
from toontown.coghq import LaserGameBase
class LaserGameRoll(LaserGameBase.LaserGameBase):
def __init__(self, funcSuccess, funcFail, funcSendGrid, funcSetGrid):
LaserGameBase.LaserGameBase.__init__(self, funcSuccess, funcFail, funcSendGrid, funcSetGrid)
self.setGridSize(5, 5)
self.blankGrid()
def win(self):
if not self.finshed:
self.blankGrid()
self.funcSendGrid()
LaserGameBase.LaserGameBase.win(self)
def lose(self):
self.blankGrid()
self.funcSendGrid()
LaserGameBase.LaserGameBase.lose(self)
def startGrid(self):
LaserGameBase.LaserGameBase.startGrid(self)
for column in xrange(0, self.gridNumX):
for row in xrange(0, self.gridNumY):
tile = random.choice([
10,
13])
self.gridData[column][row] = tile
for column in xrange(0, self.gridNumX):
self.gridData[column][self.gridNumY - 1] = 12
def hit(self, hitX, hitY, oldx = -1, oldy = -1):
if self.finshed:
return None
if self.gridData[hitX][hitY] == 10:
self.gridData[hitX][hitY] = 13
elif self.gridData[hitX][hitY] == 13:
self.gridData[hitX][hitY] = 10
if self.checkForWin():
self.win()
else:
self.funcSendGrid()
def checkForWin(self):
count1 = 0
count2 = 0
for column in xrange(0, self.gridNumX):
for row in xrange(0, self.gridNumY):
if self.gridData[column][row] == 10:
count1 += 1
continue
if self.gridData[column][row] == 13:
count2 += 1
continue
if count1 and count2:
return 0
else:
return 1
| apache-2.0 |
framon/samba | buildtools/wafadmin/Runner.py | 16 | 5556 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"Execute the tasks"
import os, sys, random, time, threading, traceback
try: from Queue import Queue
except ImportError: from queue import Queue
import Build, Utils, Logs, Options
from Logs import debug, error
from Constants import *
GAP = 15
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
def process_task(tsk):
m = tsk.master
if m.stop:
m.out.put(tsk)
return
try:
tsk.generator.bld.printout(tsk.display())
if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
# actual call to task's run() function
else: ret = tsk.call_run()
except Exception, e:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(tsk)
m.out.put(tsk)
return
if ret:
tsk.err_code = ret
tsk.hasrun = CRASHED
else:
try:
tsk.post_run()
except Utils.WafError:
pass
except Exception:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
else:
tsk.hasrun = SUCCESS
if tsk.hasrun != SUCCESS:
m.error_handler(tsk)
m.out.put(tsk)
class TaskConsumer(threading.Thread):
ready = Queue(0)
consumers = []
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except:
pass
def loop(self):
while 1:
tsk = TaskConsumer.ready.get()
process_task(tsk)
class Parallel(object):
"""
keep the consumer threads busy, and avoid consuming cpu cycles
when no more tasks can be added (end of the build, etc)
"""
def __init__(self, bld, j=2):
# number of consumers
self.numjobs = j
self.manager = bld.task_manager
self.manager.current_group = 0
self.total = self.manager.total()
# tasks waiting to be processed - IMPORTANT
self.outstanding = []
self.maxjobs = MAXJOBS
# tasks that are awaiting for another task to complete
self.frozen = []
# tasks returned by the consumers
self.out = Queue(0)
self.count = 0 # tasks not in the producer area
self.processed = 1 # progress indicator
self.stop = False # error condition to stop the build
self.error = False # error flag
def get_next(self):
"override this method to schedule the tasks in a particular order"
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"override this method to schedule the tasks in a particular order"
# TODO consider using a deque instead
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"called to set the next group of tasks"
while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
(jobs, tmp) = self.manager.get_next_set()
if jobs != None: self.maxjobs = jobs
if tmp: self.outstanding += tmp
break
def get_out(self):
"the tasks that are put to execute are all collected using get_out"
ret = self.out.get()
self.manager.add_finished(ret)
if not self.stop and getattr(ret, 'more_tasks', None):
self.outstanding += ret.more_tasks
self.total += len(ret.more_tasks)
self.count -= 1
def error_handler(self, tsk):
"by default, errors make the build stop (not thread safe so be careful)"
if not Options.options.keep:
self.stop = True
self.error = True
def start(self):
"execute the tasks"
if TaskConsumer.consumers:
# the worker pool is usually loaded lazily (see below)
# in case it is re-used with a different value of numjobs:
while len(TaskConsumer.consumers) < self.numjobs:
TaskConsumer.consumers.append(TaskConsumer())
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
self.manager.add_finished(tsk)
continue
try:
st = tsk.runnable_status()
except Exception, e:
self.processed += 1
if self.stop and not Options.options.keep:
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
continue
self.error_handler(tsk)
self.manager.add_finished(tsk)
tsk.hasrun = EXCEPTION
tsk.err_msg = Utils.ex_stack()
continue
if st == ASK_LATER:
self.postpone(tsk)
elif st == SKIP_ME:
self.processed += 1
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
process_task(tsk)
else:
TaskConsumer.ready.put(tsk)
# create the consumer threads only if there is something to consume
if not TaskConsumer.consumers:
TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)]
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
| gpl-3.0 |
mchasal/compose | compose/project.py | 15 | 12276 | from __future__ import unicode_literals
from __future__ import absolute_import
from functools import reduce
import logging
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
from .container import Container
from .legacy import check_for_legacy_containers
from .service import Service
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
force_recreate=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
force_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and allow_recreate:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=True,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None):
for service in self.get_services(service_names, include_deps=True):
service.pull()
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
| apache-2.0 |
mahendra-r/edx-platform | lms/djangoapps/certificates/tests/test_queue.py | 43 | 5953 | # -*- coding: utf-8 -*-
"""Tests for the XQueue certificates interface. """
from contextlib import contextmanager
import json
from mock import patch, Mock
from nose.plugins.attrib import attr
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory
# It is really unfortunate that we are using the XQueue client
# code from the capa library. In the future, we should move this
# into a shared library. We import it here so we can mock it
# and verify that items are being correctly added to the queue
# in our `XQueueCertInterface` implementation.
from capa.xqueue_interface import XQueueInterface
from certificates.queue import XQueueCertInterface
from certificates.models import (
ExampleCertificateSet,
ExampleCertificate,
GeneratedCertificate,
CertificateStatuses,
)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
"""Test the "add to queue" operation of the XQueue interface. """
def setUp(self):
super(XQueueCertInterfaceAddCertificateTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
is_active=True,
mode="honor",
)
self.xqueue = XQueueCertInterface()
def test_add_cert_callback_url(self):
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user, self.course.id)
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
def test_no_create_action_in_queue_for_html_view_certs(self):
"""
Tests there is no certificate create message in the queue if generate_pdf is False
"""
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
self.xqueue.add_cert(self.user, self.course.id, generate_pdf=False)
# Verify that add_cert method does not add message to queue
self.assertFalse(mock_send.called)
certificate = GeneratedCertificate.objects.get(user=self.user, course_id=self.course.id)
self.assertEqual(certificate.status, CertificateStatuses.downloadable)
self.assertIsNotNone(certificate.verify_uuid)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceExampleCertificateTest(TestCase):
"""Tests for the XQueue interface for certificate generation. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
TEMPLATE = 'test.pdf'
DESCRIPTION = 'test'
ERROR_MSG = 'Kaboom!'
def setUp(self):
super(XQueueCertInterfaceExampleCertificateTest, self).setUp()
self.xqueue = XQueueCertInterface()
def test_add_example_cert(self):
cert = self._create_example_cert()
with self._mock_xqueue() as mock_send:
self.xqueue.add_example_cert(cert)
# Verify that the correct payload was sent to the XQueue
self._assert_queue_task(mock_send, cert)
# Verify the certificate status
self.assertEqual(cert.status, ExampleCertificate.STATUS_STARTED)
def test_add_example_cert_error(self):
cert = self._create_example_cert()
with self._mock_xqueue(success=False):
self.xqueue.add_example_cert(cert)
# Verify the error status of the certificate
self.assertEqual(cert.status, ExampleCertificate.STATUS_ERROR)
self.assertIn(self.ERROR_MSG, cert.error_reason)
def _create_example_cert(self):
"""Create an example certificate. """
cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
return ExampleCertificate.objects.create(
example_cert_set=cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE
)
@contextmanager
def _mock_xqueue(self, success=True):
"""Mock the XQueue method for sending a task to the queue. """
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None) if success else (1, self.ERROR_MSG)
yield mock_send
def _assert_queue_task(self, mock_send, cert):
"""Check that the task was added to the queue. """
expected_header = {
'lms_key': cert.access_key,
'lms_callback_url': 'https://edx.org/update_example_certificate?key={key}'.format(key=cert.uuid),
'queue_name': 'certificates'
}
expected_body = {
'action': 'create',
'username': cert.uuid,
'name': u'John Doë',
'course_id': unicode(self.COURSE_KEY),
'template_pdf': 'test.pdf',
'example_certificate': True
}
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
actual_body = json.loads(kwargs['body'])
self.assertEqual(expected_header, actual_header)
self.assertEqual(expected_body, actual_body)
| agpl-3.0 |
linkedin/indextank-service | storefront/boto/s3/bucketlistresultset.py | 3 | 2380 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
"""
A generator function for listing keys in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_keys(prefix=prefix, marker=marker,
delimiter=delimiter, headers=headers)
for k in rs:
yield k
if k:
marker = k.name
more_results= rs.is_truncated
class BucketListResultSet:
"""
A resultset for listing keys within a bucket. Uses the bucket_lister
generator function and implements the iterator interface. This
transparently handles the results paging from S3 so even if you have
many thousands of keys within the bucket you can iterate over all
keys in a reasonably efficient manner.
"""
def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None):
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.marker = marker
self.headers = headers
def __iter__(self):
return bucket_lister(self.bucket, prefix=self.prefix,
delimiter=self.delimiter, marker=self.marker, headers=self.headers)
| apache-2.0 |
ingenieroariel/geonode | geonode/upload/forms.py | 1 | 8421 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import files
import tempfile
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from geonode import geoserver, qgis_server
from geonode.layers.forms import JSONField
from geonode.upload.models import UploadFile
from geonode.geoserver.helpers import ogc_server_settings
from geonode.utils import check_ogc_backend
class UploadFileForm(forms.ModelForm):
class Meta:
model = UploadFile
fields = '__all__'
class LayerUploadForm(forms.Form):
base_file = forms.FileField()
dbf_file = forms.FileField(required=False)
shx_file = forms.FileField(required=False)
prj_file = forms.FileField(required=False)
xml_file = forms.FileField(required=False)
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
sld_file = forms.FileField(required=False)
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
qml_file = forms.FileField(required=False)
geogig = forms.BooleanField(required=False)
geogig_store = forms.CharField(required=False)
time = forms.BooleanField(required=False)
mosaic = forms.BooleanField(required=False)
append_to_mosaic_opts = forms.BooleanField(required=False)
append_to_mosaic_name = forms.CharField(required=False)
mosaic_time_regex = forms.CharField(required=False)
mosaic_time_value = forms.CharField(required=False)
time_presentation = forms.CharField(required=False)
time_presentation_res = forms.IntegerField(required=False)
time_presentation_default_value = forms.CharField(required=False)
time_presentation_reference_value = forms.CharField(required=False)
abstract = forms.CharField(required=False)
layer_title = forms.CharField(required=False)
permissions = JSONField()
metadata_uploaded_preserve = forms.BooleanField(required=False)
metadata_upload_form = forms.BooleanField(required=False)
style_upload_form = forms.BooleanField(required=False)
spatial_files = [
"base_file",
"dbf_file",
"shx_file",
"prj_file",
"xml_file",
]
# Adding style file based on the backend
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
spatial_files.append('sld_file')
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
spatial_files.append('qml_file')
spatial_files = tuple(spatial_files)
def clean(self):
requires_datastore = () if ogc_server_settings.DATASTORE else (
'.csv',
'.kml')
types = [t for t in files.types if t.code not in requires_datastore]
def supported_type(ext):
return any([t.matches(ext) for t in types])
cleaned = super(LayerUploadForm, self).clean()
base_name, base_ext = os.path.splitext(cleaned["base_file"].name)
if base_ext.lower() == '.zip':
# for now, no verification, but this could be unified
pass
elif not supported_type(base_ext.lower()[1:]):
supported = " , ".join([t.name for t in types])
raise forms.ValidationError(
"%s files are supported. You uploaded a %s file" %
(supported, base_ext))
if base_ext.lower() == ".shp":
dbf_file = cleaned["dbf_file"]
shx_file = cleaned["shx_file"]
if dbf_file is None or shx_file is None:
raise forms.ValidationError(
"When uploading Shapefiles, .SHX and .DBF files are also required.")
dbf_name, __ = os.path.splitext(dbf_file.name)
shx_name, __ = os.path.splitext(shx_file.name)
if dbf_name != base_name or shx_name != base_name:
raise forms.ValidationError(
"It looks like you're uploading "
"components from different Shapefiles. Please "
"double-check your file selections.")
if cleaned["prj_file"] is not None:
prj_file = cleaned["prj_file"].name
if os.path.splitext(prj_file)[0] != base_name:
raise forms.ValidationError(
"It looks like you're "
"uploading components from different Shapefiles. "
"Please double-check your file selections.")
return cleaned
def write_files(self):
tempdir = tempfile.mkdtemp(dir=settings.FILE_UPLOAD_TEMP_DIR)
for field in self.spatial_files:
f = self.cleaned_data[field]
if f is not None:
path = os.path.join(tempdir, f.name)
with open(path, 'wb') as writable:
for c in f.chunks():
writable.write(c)
absolute_base_file = os.path.join(tempdir,
self.cleaned_data["base_file"].name)
return tempdir, absolute_base_file
class TimeForm(forms.Form):
presentation_strategy = forms.CharField(required=False)
precision_value = forms.IntegerField(required=False)
precision_step = forms.ChoiceField(required=False, choices=[
('years',) * 2,
('months',) * 2,
('days',) * 2,
('hours',) * 2,
('minutes',) * 2,
('seconds',) * 2
])
def __init__(self, *args, **kwargs):
# have to remove these from kwargs or Form gets mad
time_names = kwargs.pop('time_names', None)
text_names = kwargs.pop('text_names', None)
year_names = kwargs.pop('year_names', None)
super(TimeForm, self).__init__(*args, **kwargs)
self._build_choice('time_attribute', time_names)
self._build_choice('end_time_attribute', time_names)
self._build_choice('text_attribute', text_names)
self._build_choice('end_text_attribute', text_names)
widget = forms.TextInput(attrs={'placeholder': 'Custom Format'})
if text_names:
self.fields['text_attribute_format'] = forms.CharField(
required=False, widget=widget)
self.fields['end_text_attribute_format'] = forms.CharField(
required=False, widget=widget)
self._build_choice('year_attribute', year_names)
self._build_choice('end_year_attribute', year_names)
def _resolve_attribute_and_type(self, *name_and_types):
return [(self.cleaned_data[n], t) for n, t in name_and_types
if self.cleaned_data.get(n, None)]
def _build_choice(self, att, names):
if names:
names.sort()
choices = [('', '<None>')] + [(a, a) for a in names]
self.fields[att] = forms.ChoiceField(
choices=choices, required=False)
def clean(self):
starts = self._resolve_attribute_and_type(
('time_attribute', 'Date'),
('text_attribute', 'Text'),
('year_attribute', 'Number'),
)
if len(starts) > 1:
raise ValidationError('multiple start attributes')
ends = self._resolve_attribute_and_type(
('end_time_attribute', 'Date'),
('end_text_attribute', 'Text'),
('end_year_attribute', 'Number'),
)
if len(ends) > 1:
raise ValidationError('multiple end attributes')
if len(starts) > 0:
self.cleaned_data['start_attribute'] = starts[0]
if len(ends) > 0:
self.cleaned_data['end_attribute'] = ends[0]
return self.cleaned_data
# @todo implement clean
class SRSForm(forms.Form):
srs = forms.CharField(required=True)
| gpl-3.0 |
stanxii/wen9000 | node.js/node_modules/npm/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/generator/msvs.py | 41 | 109235 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
# TODO(jeanluc) The way we currently generate libraries makes Visual
# Studio 2010 unhappy. We get a lot of warnings like:
# warning MSB8012: TargetPath(...\Debug\gles2_c_lib.lib) does not match
# the Library's OutputFile property value (...\Debug\lib\gles2_c_lib.lib).
# This may cause your project to build incorrectly. To correct this,
# please make sure that $(OutDir), $(TargetName) and $(TargetExt) property
# values match the value specified in %(Lib.OutputFile).
# Despite the warnings, this compile correctly. It would be nice to get rid
# of the warnings.
# TODO(jeanluc) I had: 'LIB_DIR': '$(OutDir)lib',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_shard',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Fix the paths
# If the argument starts with a slash, it's probably a command line switch
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)','%INPUTDIR%') for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1))/2*4)*'\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set(_FixPaths(inputs))
outputs = set(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
if version.UsesVcxproj():
_GenerateMSBuildProject(project, options, version, generator_flags)
else:
_GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, project_dir, sources, excluded_sources, list_excluded))
# Add in files.
_VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = set()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
# TODO(jeanluc) If we want to avoid the MSB8012 warnings in
# VisualStudio 2010, we will have to change the value of $(OutDir)
# to contain the \lib suffix, rather than doing it as below.
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)\\lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
# TODO(jeanluc) If we want to avoid the MSB8012 warning, we should
# add code like the following to place libraries in their own directory.
# if config_type == '4':
# output_dir = spec.get('product_dir', output_dir + '\\lib')
prepared_attrs['OutputDirectory'] = output_dir
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(spec, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%d' % (parts[0], number)
return '#'.join(parts)
def _ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = _ShardTargets(target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version, generator_flags)
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_lines_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
logging_section,
message_section,
write_lines_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['TargetName', target_name],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
def _GetMSBuildAttributes(spec, config, build_file):
# Use the MSVS attributes and convert them. In the future, we may want to
# support Gyp files specifying 'msbuild_configuration_attributes' directly.
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = {
'0': 'MultiByte',
'1': 'Unicode'
}[msvs_attributes[a]]
elif a == 'ConfigurationType':
msbuild_attributes[a] = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[msvs_attributes[a]]
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
for name, values in sorted(properties.iteritems()):
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalDependencies',
'AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
_VerifySourcesExist(source.contents, root_dir)
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
print 'Warning: Missing input file ' + full_path + ' pwd=' +\
os.getcwd()
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
msbuildproj_dir = os.path.dirname(project.path)
if msbuildproj_dir and not os.path.exists(msbuildproj_dir):
os.makedirs(msbuildproj_dir)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded))
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
_VerifySourcesExist(sources, project_dir)
for (_, configuration) in configurations.iteritems():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path)
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = set()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
command = ' && '.join(commands)
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| lgpl-2.1 |
vancepym/ogre | Tools/Blender2.6Export/ogre_mesh_exporter/global_properties.py | 16 | 9771 | # ##### BEGIN MIT LICENSE BLOCK #####
# Copyright (C) 2011 by Lih-Hern Pang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ##### END MIT LICENSE BLOCK #####
import bpy, os, sys, configparser
from bpy.props import *
STATIC_CONFIG_FILENAME = "ogre_mesh_exporter.cfg"
class SelectedObject(bpy.types.PropertyGroup):
name = StringProperty(name = "Name", default = "Unknown", options = set())
objectName = StringProperty(name = "Object", default = "Unknown", options = set())
class SelectedObjectList(bpy.types.PropertyGroup):
def onSelectionChanged(self, context):
# Set the selected object as active.
bpy.context.scene.objects.active = bpy.data.objects[self.collection[self.collectionIndex].objectName]
collection = CollectionProperty(type = SelectedObject, options = set())
collectionIndex = IntProperty(min = -1, default = -1, options = set(), update=onSelectionChanged)
class GlobalProperties(bpy.types.PropertyGroup):
# ##############################################
# Material Properties
exportMaterials = BoolProperty(
name = "Export Materials",
description = "Enable/Disable material file exporting.",
default = True,
options = set()
)
materialFile = StringProperty(
name = "Material File",
description = "File name of material.",
default = "Scene.material",
options = set()
)
copyTextures = BoolProperty(
name = "Copy Textures",
description = "Copy textures to export path.",
default = False,
options = set()
)
materialExportMode = EnumProperty(
name = "Material Export Mode",
description = "Diffrent Material Export Modes.",
items = (("rend", "Rendering Materials", "Export using rendering materials."),
("game", "Game Engine Materials", "Export using game engine materials."),
("custom", "Custom Materials", "Export using custom template based materials."),
),
default = "rend",
options = set()
)
templatePath = StringProperty(
name = "Template Path",
description = "Path to material templates for generating custom materials.",
subtype = "DIR_PATH",
options = set()
)
# ##############################################
# Mesh Properties
exportMeshes = BoolProperty(
name = "Export Meshes",
description = "Enable/Disable mesh & skeleton file exporting.",
default = True,
options = set()
)
exportPath = StringProperty(
name = "Export Path",
description = "Path to export files.",
subtype = "DIR_PATH",
options = set()
)
fixUpAxisToY = BoolProperty(
name = "Fix Up Axis to Y",
description = "Fix up axis as Y instead of Z.",
default = True,
options = set()
)
requireMaterials = BoolProperty(
name = "Require Materials",
description = "Generate Error message when part of a mesh is not assigned with a material.",
default = True,
options = set()
)
applyModifiers = BoolProperty(
name = "Apply Modifiers",
description = "Apply mesh modifiers before export. (Slow and may break vertex order for morph targets!)",
default = False,
options = set()
)
skeletonNameFollowMesh = BoolProperty(
name = "Skeleton Name Follow Mesh",
description = "Use mesh name for exported skeleton name instead of the armature name.",
default = True,
options = set()
)
runOgreXMLConverter = BoolProperty(
name = "OgreXMLConverter",
description = "Run OgreXMLConverter on exported XML files.",
default = True,
options = set()
)
# ##############################################
# XML Converter Properties
# This is only a temporary property for editing due to blender's limitation of it's dynamic properties.
# The true value is stored in the globally shared config file.
# This means that this value will be the same for all blend file opened.
ogreXMLConverterPath = StringProperty(
name = "Ogre XML Converter Path",
description = "Path to OgreXMLConverter.",
subtype = "FILE_PATH",
options = {'SKIP_SAVE'}
)
# Saved to the shared config file as above.
ogreXMLConverterAdditionalArg = StringProperty(
name = "Additional Arguments",
description = "Additional Arguments outside of the provided options below. Note that this is shared across all blend files.",
options = {'SKIP_SAVE'}
)
useXMLConverterOptions = BoolProperty(
name = "Use XML Converter Options",
description = "Use the settings set by this XML converter option. These options are saved in blend file. If you want a globally shared option, please uncheck this and use the 'Additional Arguments' option.",
default = True,
options = {'SKIP_SAVE'}
)
extremityPoints = IntProperty(
name = "Extremity Points",
description = "Generate no more than num eXtremes for every submesh. (For submesh render sorting when using alpha materials on submesh)",
soft_min = 0,
soft_max = 65536,
options = {'SKIP_SAVE'}
)
edgeLists = BoolProperty(
name = "Edge Lists",
description = "Generate edge lists. (Useful for outlining or doing stencil shadows)",
default = False,
options = {'SKIP_SAVE'}
)
tangent = BoolProperty(
name = "Tangent",
description = "Generate tangent.",
default = False,
options = {'SKIP_SAVE'}
)
tangentSemantic = EnumProperty(
name = "Tangent Semantic",
description = "Tangent Semantic to use.",
items=(("uvw", "uvw", "Use UV semantic."),
("tangent", "tangent", "Use tangent semantic."),
),
default= "tangent",
options = {'SKIP_SAVE'}
)
tangentSize = EnumProperty(
name = "Tangent Size",
description = "Size of tangent.",
items=(("4", "4 component (parity)", "Use 4 component tangent where 4th component is parity."),
("3", "3 component", "Use 3 component tangent."),
),
default= "3",
options = {'SKIP_SAVE'}
)
splitMirrored = BoolProperty(
name = "Split Mirrored",
description = "Split tangent vertices at UV mirror points.",
default = False,
options = {'SKIP_SAVE'}
)
splitRotated = BoolProperty(
name = "Split Rotated",
description = "Split tangent vertices where basis is rotated > 90 degrees.",
default = False,
options = {'SKIP_SAVE'}
)
reorganiseVertBuff = BoolProperty(
name = "Reorganise Vertex Buffers",
description = "Reorganise vertex buffer to make it GPU vertex cache friendly.",
default = True,
options = {'SKIP_SAVE'}
)
optimiseAnimation = BoolProperty(
name = "Optimise Animation",
description = "Optimise out redundant tracks & keyframes.",
default = True,
options = {'SKIP_SAVE'}
)
# ##############################################
# Log properties.
logPageSize = IntProperty(
name = "Log Page Size",
description = "Size of a visible log page",
default = 10,
options = {'SKIP_SAVE'}
)
logPercentage = IntProperty(
name = "Log Percentage",
description = "Log progress",
default = 100, min = 0, max = 100,
subtype = 'PERCENTAGE',
options = {'SKIP_SAVE'}
)
# ##############################################
# temporary collection for listing selected meshes.
selectedObjectList = PointerProperty(type = SelectedObjectList)
def onDummyTrueChanged(self, context):
# Never let Dummy change.
self.dummyTrue = True
def onDummyFalseChanged(self, context):
# Never let Dummy change.
self.dummyFalse = False
# Dummy property for tab use. (NEVER SET)
dummyTrue = BoolProperty(
default = True,
update = onDummyTrueChanged,
options = {'SKIP_SAVE'})
# Dummy property for label box use. (NEVER SET)
dummyFalse = BoolProperty(
default = False,
update = onDummyFalseChanged,
options = {'SKIP_SAVE'})
# Load static data from config file.
def loadStaticConfig():
global OGRE_XML_CONVERTERPATH
global_settings = bpy.context.scene.ogre_mesh_exporter
# load static data from config file.
config_path = bpy.utils.user_resource('CONFIG')
config_filepath = os.path.join(config_path, STATIC_CONFIG_FILENAME)
config = configparser.ConfigParser()
config.read(config_filepath)
if sys.platform.startswith('win'):
global_settings.ogreXMLConverterPath = _parseConfig(config, "PATHS", "OgreXMLConverter", "C:\\OgreCommandLineTools\\OgreXmlConverter.exe")
elif sys.platform.startswith('linux'):
global_settings.ogreXMLConverterPath = _parseConfig(config, "PATHS", "OgreXMLConverter", "/usr/bin/OgreXMLConverter")
# Parse static config data.
def _parseConfig(config, section, key, default):
try:
return config.get(section, key)
except configparser.Error:
return default
# Save static data to config file.
def saveStaticConfig():
global_settings = bpy.context.scene.ogre_mesh_exporter
config_path = bpy.utils.user_resource('CONFIG')
config_filepath = os.path.join(config_path, STATIC_CONFIG_FILENAME)
config = configparser.ConfigParser()
config.add_section("PATHS")
config.set("PATHS", "OgreXMLConverter", global_settings.ogreXMLConverterPath)
config.read(config_filepath)
with open(config_filepath, 'w') as configfile:
config.write(configfile)
| mit |
LittleLama/Sick-Beard-BoxCar2 | cherrypy/lib/static.py | 39 | 14178 | import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
import os
import re
import stat
import time
from urllib import unquote
import cherrypy
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % content_length
message = "Invalid Range (first-byte-pos greater than Content-Length)"
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
import mimetools
boundary = mimetools.choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield "\r\n"
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
yield "--" + boundary
yield "\r\nContent-type: %s" % content_type
yield ("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, content_length))
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop - start):
yield chunk
yield "\r\n"
# Final boundary
yield "--" + boundary + "--"
# Apache compatibility:
yield "\r\n"
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, "application/x-download", "attachment", name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root="", match="", content_types=None, index="",
debug=False):
"""Serve a static resource from the given (root +) dir.
If 'match' is given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
If content_types is given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
If 'index' is provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = "Static dir requires an absolute dir (or root)."
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = "/"
section = section.rstrip(r"\/")
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r"\/"))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r"\/")
return handled
def staticfile(filename, root=None, match="", content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
If 'match' is given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
If content_types is given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| gpl-3.0 |
havt/odoo | addons/account/product.py | 374 | 2897 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices to value sales."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices to value expenses."),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'taxes_id': fields.many2many('account.tax', 'product_taxes_rel',
'prod_id', 'tax_id', 'Customer Taxes',
domain=[('parent_id','=',False),('type_tax_use','in',['sale','all'])]),
'supplier_taxes_id': fields.many2many('account.tax',
'product_supplier_taxes_rel', 'prod_id', 'tax_id',
'Supplier Taxes', domain=[('parent_id', '=', False),('type_tax_use','in',['purchase','all'])]),
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices instead of the default one to value sales for the current product."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices instead of the default one to value expenses for the current product."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
open-io/oio-swift | tests/unit/controllers/test_container.py | 1 | 5114 | import unittest
from mock import patch
from mock import MagicMock as Mock
from oioswift.common.ring import FakeRing
from oioswift import server as proxy_server
from swift.common.swob import Request
from swift.proxy.controllers.base import headers_to_container_info
from swift.common.request_helpers import get_sys_meta_prefix
from tests.unit import FakeStorageAPI, debug_logger
class TestContainerController(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('proxy-server')
self.storage = FakeStorageAPI(logger=self.logger)
self.storage.account.account_show = Mock(return_value={
'containers': 0,
'objects': 0,
'bytes': 0,
'ctime': 0,
'metadata': {}})
self.account_info = {
'status': 200,
'container_count': '10',
'total_object_count': '100',
'bytes': '1000',
'meta': {},
'sysmeta': {}
}
self.app = proxy_server.Application(
{'sds_namespace': 'NS'}, account_ring=FakeRing(),
container_ring=FakeRing(), storage=self.storage,
logger=self.logger)
class FakeAccountInfoContainerController(
proxy_server.ContainerController):
def account_info(controller, *args, **kwargs):
patch_path = 'swift.proxy.controllers.base.get_info'
with patch(patch_path) as mock_get_info:
mock_get_info.return_value = dict(self.account_info)
return super(FakeAccountInfoContainerController,
controller).account_info(
*args, **kwargs)
_orig_get_controller = self.app.get_controller
def wrapped_get_controller(*args, **kwargs):
with patch('swift.proxy.server.ContainerController',
new=FakeAccountInfoContainerController):
return _orig_get_controller(*args, **kwargs)
self.app.get_controller = wrapped_get_controller
def test_container_info(self):
req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'}, method='HEAD')
self.storage.container.container_show = Mock(return_value={})
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
self.assertIn('swift.infocache', resp.environ)
self.assertIn('container/a/c', resp.environ['swift.infocache'])
self.assertEqual(
headers_to_container_info(resp.headers, resp.status_int),
resp.environ['swift.infocache']['container/a/c'])
def test_swift_owner(self):
owner_headers = {'properties': {
'x-container-read': 'value', 'x-container-write': 'value',
'x-container-sync-key': 'value', 'x-container-sync-to': 'value'}}
req = Request.blank('/v1/a/c', method='HEAD')
self.storage.container.container_get_properties = Mock(
return_value=owner_headers)
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for k in owner_headers['properties']:
self.assertTrue(k not in resp.headers)
req = Request.blank(
'/v1/a/c', environ={'swift_owner': True}, method='HEAD')
self.storage.container.container_get_properties = Mock(
return_value=owner_headers)
resp = req.get_response(self.app)
self.assertEqual(2, resp.status_int // 100)
for k in owner_headers['properties']:
self.assertIn(k, resp.headers)
def test_sys_meta_headers_PUT(self):
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in, method='PUT')
self.storage.container.container_create = Mock()
req.get_response(self.app)
meta = \
self.storage.container.container_create.call_args[1]['properties']
self.assertEqual(meta[sys_meta_key], 'foo')
self.assertEqual(meta[user_meta_key], 'bar')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in, method='POST')
self.storage.container.container_set_properties = Mock(
return_value="")
req.get_response(self.app)
meta = self.storage.container.container_set_properties.call_args[0][2]
self.assertEqual(meta[sys_meta_key], 'foo')
self.assertEqual(meta[user_meta_key], 'bar')
| apache-2.0 |
libcrosswind/libcrosswind | platform/windows/compilers/x64/TDM-GCC-64/gdb64/bin/lib/idlelib/idle_test/test_calltips.py | 27 | 7140 | import unittest
import idlelib.CallTips as ct
CTi = ct.CallTips() # needed for get_entity test in 2.7
import textwrap
import types
import warnings
default_tip = ''
# Test Class TC is used in multiple get_argspec test methods
class TC(object):
'doc'
tip = "(ai=None, *args)"
def __init__(self, ai=None, *b): 'doc'
__init__.tip = "(self, ai=None, *args)"
def t1(self): 'doc'
t1.tip = "(self)"
def t2(self, ai, b=None): 'doc'
t2.tip = "(self, ai, b=None)"
def t3(self, ai, *args): 'doc'
t3.tip = "(self, ai, *args)"
def t4(self, *args): 'doc'
t4.tip = "(self, *args)"
def t5(self, ai, b=None, *args, **kw): 'doc'
t5.tip = "(self, ai, b=None, *args, **kwargs)"
def t6(no, self): 'doc'
t6.tip = "(no, self)"
def __call__(self, ci): 'doc'
__call__.tip = "(self, ci)"
# attaching .tip to wrapped methods does not work
@classmethod
def cm(cls, a): 'doc'
@staticmethod
def sm(b): 'doc'
tc = TC()
signature = ct.get_arg_text # 2.7 and 3.x use different functions
class Get_signatureTest(unittest.TestCase):
# The signature function must return a string, even if blank.
# Test a variety of objects to be sure that none cause it to raise
# (quite aside from getting as correct an answer as possible).
# The tests of builtins may break if the docstrings change,
# but a red buildbot is better than a user crash (as has happened).
# For a simple mismatch, change the expected output to the actual.
def test_builtins(self):
# 2.7 puts '()\n' where 3.x does not, other minor differences
# Python class that inherits builtin methods
class List(list): "List() doc"
# Simulate builtin with no docstring for default argspec test
class SB: __call__ = None
def gtest(obj, out):
self.assertEqual(signature(obj), out)
if List.__doc__ is not None:
gtest(List, '()\n' + List.__doc__)
gtest(list.__new__,
'T.__new__(S, ...) -> a new object with type S, a subtype of T')
gtest(list.__init__,
'x.__init__(...) initializes x; see help(type(x)) for signature')
append_doc = "L.append(object) -- append object to end"
gtest(list.append, append_doc)
gtest([].append, append_doc)
gtest(List.append, append_doc)
gtest(types.MethodType, '()\ninstancemethod(function, instance, class)')
gtest(SB(), default_tip)
def test_signature_wrap(self):
# This is also a test of an old-style class
if textwrap.TextWrapper.__doc__ is not None:
self.assertEqual(signature(textwrap.TextWrapper), '''\
(width=70, initial_indent='', subsequent_indent='', expand_tabs=True,
replace_whitespace=True, fix_sentence_endings=False, break_long_words=True,
drop_whitespace=True, break_on_hyphens=True)''')
def test_docline_truncation(self):
def f(): pass
f.__doc__ = 'a'*300
self.assertEqual(signature(f), '()\n' + 'a' * (ct._MAX_COLS-3) + '...')
def test_multiline_docstring(self):
# Test fewer lines than max.
self.assertEqual(signature(list),
"()\nlist() -> new empty list\n"
"list(iterable) -> new list initialized from iterable's items")
# Test max lines and line (currently) too long.
def f():
pass
s = 'a\nb\nc\nd\n'
f.__doc__ = s + 300 * 'e' + 'f'
self.assertEqual(signature(f),
'()\n' + s + (ct._MAX_COLS - 3) * 'e' + '...')
def test_functions(self):
def t1(): 'doc'
t1.tip = "()"
def t2(a, b=None): 'doc'
t2.tip = "(a, b=None)"
def t3(a, *args): 'doc'
t3.tip = "(a, *args)"
def t4(*args): 'doc'
t4.tip = "(*args)"
def t5(a, b=None, *args, **kwds): 'doc'
t5.tip = "(a, b=None, *args, **kwargs)"
doc = '\ndoc' if t1.__doc__ is not None else ''
for func in (t1, t2, t3, t4, t5, TC):
self.assertEqual(signature(func), func.tip + doc)
def test_methods(self):
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth in (TC.t1, TC.t2, TC.t3, TC.t4, TC.t5, TC.t6, TC.__call__):
self.assertEqual(signature(meth), meth.tip + doc)
self.assertEqual(signature(TC.cm), "(a)" + doc)
self.assertEqual(signature(TC.sm), "(b)" + doc)
def test_bound_methods(self):
# test that first parameter is correctly removed from argspec
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth, mtip in ((tc.t1, "()"), (tc.t4, "(*args)"), (tc.t6, "(self)"),
(tc.__call__, '(ci)'), (tc, '(ci)'), (TC.cm, "(a)"),):
self.assertEqual(signature(meth), mtip + doc)
def test_starred_parameter(self):
# test that starred first parameter is *not* removed from argspec
class C:
def m1(*args): pass
def m2(**kwds): pass
def f1(args, kwargs, *a, **k): pass
def f2(args, kwargs, args1, kwargs1, *a, **k): pass
c = C()
self.assertEqual(signature(C.m1), '(*args)')
self.assertEqual(signature(c.m1), '(*args)')
self.assertEqual(signature(C.m2), '(**kwargs)')
self.assertEqual(signature(c.m2), '(**kwargs)')
self.assertEqual(signature(f1), '(args, kwargs, *args1, **kwargs1)')
self.assertEqual(signature(f2),
'(args, kwargs, args1, kwargs1, *args2, **kwargs2)')
def test_no_docstring(self):
def nd(s): pass
TC.nd = nd
self.assertEqual(signature(nd), "(s)")
self.assertEqual(signature(TC.nd), "(s)")
self.assertEqual(signature(tc.nd), "()")
def test_attribute_exception(self):
class NoCall(object):
def __getattr__(self, name):
raise BaseException
class Call(NoCall):
def __call__(self, ci):
pass
for meth, mtip in ((NoCall, '()'), (Call, '()'),
(NoCall(), ''), (Call(), '(ci)')):
self.assertEqual(signature(meth), mtip)
def test_non_callables(self):
for obj in (0, 0.0, '0', b'0', [], {}):
self.assertEqual(signature(obj), '')
class Get_entityTest(unittest.TestCase):
# In 3.x, get_entity changed from 'instance method' to module function
# since 'self' not used. Use dummy instance until change 2.7 also.
def test_bad_entity(self):
self.assertIsNone(CTi.get_entity('1//0'))
def test_good_entity(self):
self.assertIs(CTi.get_entity('int'), int)
class Py2Test(unittest.TestCase):
def test_paramtuple_float(self):
# 18539: (a,b) becomes '.0' in code object; change that but not 0.0
with warnings.catch_warnings():
# Suppess message of py3 deprecation of parameter unpacking
warnings.simplefilter("ignore")
exec "def f((a,b), c=0.0): pass"
self.assertEqual(signature(f), '(<tuple>, c=0.0)')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| gpl-3.0 |
mogoweb/chromium-crosswalk | remoting/tools/verify_resources.py | 25 | 5148 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verifies that GRD resource files define all the strings used by a given
set of source files. For file formats where it is not possible to infer which
strings represent message identifiers, localized strings should be explicitly
annotated with the string "i18n-content", for example:
LocalizeString(/*i18n-content*/"PRODUCT_NAME");
This script also recognises localized strings in HTML and manifest.json files:
HTML: i18n-content="PRODUCT_NAME"
or i18n-value-name-1="BUTTON_NAME"
or i18n-title="TOOLTIP_NAME"
manifest.json: __MSG_PRODUCT_NAME__
Note that these forms must be exact; extra spaces are not permitted, though
either single or double quotes are recognized.
In addition, the script checks that all the messages are still in use; if
this is not the case then a warning is issued, but the script still succeeds.
"""
import json
import os
import optparse
import re
import sys
import xml.dom.minidom as minidom
WARNING_MESSAGE = """
To remove this warning, either remove the unused tags from
resource files, add the files that use the tags listed above to
remoting.gyp, or annotate existing uses of those tags with the
prefix /*i18n-content*/
"""
def LoadTagsFromGrd(filename):
xml = minidom.parse(filename)
tags = []
msgs_and_structs = xml.getElementsByTagName("message")
msgs_and_structs.extend(xml.getElementsByTagName("structure"))
for res in msgs_and_structs:
name = res.getAttribute("name")
if not name or not name.startswith("IDR_"):
raise Exception("Tag name doesn't start with IDR_: %s" % name)
tags.append(name[4:])
return tags
def ExtractTagFromLine(file_type, line):
"""Extract a tag from a line of HTML, C++, JS or JSON."""
if file_type == "html":
# HTML-style (tags)
m = re.search('i18n-content=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
# HTML-style (titles)
m = re.search('i18n-title=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
# HTML-style (substitutions)
m = re.search('i18n-value-name-[1-9]=[\'"]([^\'"]*)[\'"]', line)
if m: return m.group(1)
elif file_type == 'js':
# Javascript style
m = re.search('/\*i18n-content\*/[\'"]([^\`"]*)[\'"]', line)
if m: return m.group(1)
elif file_type == 'cc' or file_type == 'mm':
# C++ style
m = re.search('IDR_([A-Z0-9_]*)', line)
if m: return m.group(1)
m = re.search('/\*i18n-content\*/["]([^\`"]*)["]', line)
if m: return m.group(1)
elif file_type == 'json':
# Manifest style
m = re.search('__MSG_(.*)__', line)
if m: return m.group(1)
elif file_type == 'jinja2':
# Jinja2 template file
m = re.search('\{\%\s+trans\s+\%\}([A-Z0-9_]+)\{\%\s+endtrans\s+\%\}', line)
if m: return m.group(1)
return None
def VerifyFile(filename, messages, used_tags):
"""
Parse |filename|, looking for tags and report any that are not included in
|messages|. Return True if all tags are present and correct, or False if
any are missing. If no tags are found, print a warning message and return
True.
"""
base_name, extension = os.path.splitext(filename)
extension = extension[1:]
if extension not in ['js', 'cc', 'html', 'json', 'jinja2', 'mm']:
raise Exception("Unknown file type: %s" % extension)
result = True
matches = False
f = open(filename, 'r')
lines = f.readlines()
for i in xrange(0, len(lines)):
tag = ExtractTagFromLine(extension, lines[i])
if tag:
tag = tag.upper()
used_tags.add(tag)
matches = True
if not tag in messages:
result = False
print '%s/%s:%d: error: Undefined tag: %s' % \
(os.getcwd(), filename, i + 1, tag)
if not matches:
print '%s/%s:0: warning: No tags found' % (os.getcwd(), filename)
f.close()
return result
def main():
parser = optparse.OptionParser(
usage='Usage: %prog [options...] [source_file...]')
parser.add_option('-t', '--touch', dest='touch',
help='File to touch when finished.')
parser.add_option('-r', '--grd', dest='grd', action='append',
help='grd file')
options, args = parser.parse_args()
if not options.touch:
print '-t is not specified.'
return 1
if len(options.grd) == 0 or len(args) == 0:
print 'At least one GRD file needs to be specified.'
return 1
resources = []
for f in options.grd:
resources.extend(LoadTagsFromGrd(f))
used_tags = set([])
exit_code = 0
for f in args:
if not VerifyFile(f, resources, used_tags):
exit_code = 1
warnings = False
for tag in resources:
if tag not in used_tags:
print ('%s/%s:0: warning: %s is defined but not used') % \
(os.getcwd(), sys.argv[2], tag)
warnings = True
if warnings:
print WARNING_MESSAGE
if exit_code == 0:
f = open(options.touch, 'a')
f.close()
os.utime(options.touch, None)
return exit_code
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
hydroshare/hydroshare | theme/templatetags/comments_tags.py | 1 | 3173 |
from future.builtins import int
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.template.defaultfilters import linebreaksbr, urlize
from mezzanine import template
from mezzanine.conf import settings
from theme.forms import ThreadedCommentForm
from mezzanine.generic.models import ThreadedComment
from mezzanine.utils.importing import import_dotted_path
register = template.Library()
@register.inclusion_tag("generic/includes/comments.html", takes_context=True)
def comments_for(context, obj):
"""
Provides a generic context variable name for the object that
comments are being rendered for.
"""
form = ThreadedCommentForm(context["request"], obj, auto_id=True)
try:
context["posted_comment_form"]
except KeyError:
context["posted_comment_form"] = form
context["unposted_comment_form"] = form
context["comment_url"] = reverse("comment")
context["object_for_comments"] = obj
return context.flatten()
@register.inclusion_tag("generic/includes/comment.html", takes_context=True)
def comment_thread(context, parent):
"""
Return a list of child comments for the given parent, storing all
comments in a dict in the context when first called, using parents
as keys for retrieval on subsequent recursive calls from the
comments template.
"""
if "all_comments" not in context:
comments = defaultdict(list)
if "request" in context and context["request"].user.is_staff:
comments_queryset = parent.comments.all()
else:
comments_queryset = parent.comments.visible()
for comment in comments_queryset.select_related("user"):
comments[comment.replied_to_id].append(comment)
context["all_comments"] = comments
parent_id = parent.id if isinstance(parent, ThreadedComment) else None
try:
replied_to = int(context["request"].POST["replied_to"])
except KeyError:
replied_to = 0
context.update({
"comments_for_thread": context["all_comments"].get(parent_id, []),
"no_comments": parent_id is None and not context["all_comments"],
"replied_to": replied_to,
})
return context.flatten()
@register.inclusion_tag("admin/includes/recent_comments.html", takes_context=True)
def recent_comments(context):
"""
Dashboard widget for displaying recent comments.
"""
latest = context["settings"].COMMENTS_NUM_LATEST
comments = ThreadedComment.objects.all().select_related("user")
context["comments"] = comments.order_by("-id")[:latest]
return context.flatten()
@register.filter
def comment_filter(comment_text):
"""
Passed comment text to be rendered through the function defined
by the ``COMMENT_FILTER`` setting. If no function is defined
(the default), Django's ``linebreaksbr`` filter is used.
"""
filter_func = settings.COMMENT_FILTER
if not filter_func:
def filter_func(s):
return linebreaksbr(s, autoescape=True)
elif not callable(filter_func):
filter_func = import_dotted_path(filter_func)
return filter_func(comment_text)
| bsd-3-clause |
gilbertpilz/solum | solum/tests/objects/test_pipeline.py | 2 | 2831 | # Copyright 2014 - Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from solum.objects import registry
from solum.objects.sqlalchemy import execution
from solum.objects.sqlalchemy import pipeline
from solum.tests import base
from solum.tests import utils
class TestPipeline(base.BaseTestCase):
def setUp(self):
super(TestPipeline, self).setUp()
self.db = self.useFixture(utils.Database())
self.ctx = utils.dummy_context()
self.data = [{'project_id': 'project_id_blah',
'uuid': 'ce43e347f0b0422825245b3e5f140a81cef6e65b',
'user_id': 'fred',
'name': 'pipeline1',
'description': 'test pipeline',
'trigger_id': 'trigger-uuid-1234',
'tags': 'pipeline tags',
'plan_id': 'plan_id_1'}]
utils.create_models_from_data(pipeline.Pipeline, self.data, self.ctx)
def test_objects_registered(self):
self.assertTrue(registry.Pipeline)
self.assertTrue(registry.PipelineList)
def test_get_all(self):
lst = pipeline.PipelineList()
self.assertEqual(1, len(lst.get_all(self.ctx)))
def test_check_data(self):
ta = pipeline.Pipeline().get_by_id(self.ctx, self.data[0]['id'])
for key, value in self.data[0].items():
self.assertEqual(value, getattr(ta, key))
def test_check_data_by_trigger_id(self):
ta = pipeline.Pipeline().get_by_trigger_id(self.ctx, self.data[0][
'trigger_id'])
for key, value in self.data[0].items():
self.assertEqual(value, getattr(ta, key))
def test_last_execution(self):
ta = pipeline.Pipeline().get_by_id(self.ctx, self.data[0]['id'])
# add executions
ex1 = execution.Execution()
ex1.uuid = 'first'
ex1.pipeline_id = ta.id
ex1.create(self.ctx)
ex2 = execution.Execution()
ex2.uuid = 'second'
ex2.pipeline_id = ta.id
ex2.create(self.ctx)
extest = ta.last_execution()
self.assertEqual('second', extest.uuid)
def test_last_execution_none(self):
ta = pipeline.Pipeline().get_by_id(self.ctx, self.data[0]['id'])
extest = ta.last_execution()
self.assertIsNone(extest)
| apache-2.0 |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/tarfile.py | 80 | 90965 | #!/usr/bin/env python3
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import io
import shutil
import stat
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# OSError (winerror=1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (OSError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, int(n)), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise OSError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise OSError("end of file reached")
dst.write(buf)
return
def filemode(mode):
"""Deprecated in this location; use stat.filemode."""
import warnings
warnings.warn("deprecated in favor of stat.filemode",
DeprecationWarning, 2)
return stat.filemode(mode)
def _safe_print(s):
encoding = getattr(sys.stdout, 'encoding', None)
if encoding is not None:
s = s.encode(encoding, 'backslashreplace').decode(encoding)
print(s, end=' ')
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
self.exception = zlib.error
else:
self._init_write_gz()
elif comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
self.exception = OSError
else:
self.cmp = bz2.BZ2Compressor()
elif comptype == "xz":
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = lzma.LZMADecompressor()
self.exception = lzma.LZMAError
else:
self.cmp = lzma.LZMACompressor()
elif comptype != "tar":
raise CompressionError("unknown compression type %r" % comptype)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except self.exception:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\x1f\x8b\x08"):
return "gz"
elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
return "xz"
else:
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf-8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf-8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf-8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf-8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The file-object for extractfile().
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
modes = {"r": "rb", "a": "r+b", "w": "wb"}
if mode not in modes:
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if (name is None and hasattr(fileobj, "name") and
isinstance(fileobj.name, (str, bytes))):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'r:xz' open for reading with lzma compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'w:xz' open for writing with lzma compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'r|xz' open an lzma compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
'w|xz' open an lzma compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in ("a", "w"):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if mode not in ("r", "a", "w"):
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
except OSError:
if fileobj is not None and mode == 'r':
raise ReadError("not a gzip file")
raise
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except OSError:
fileobj.close()
if mode == 'r':
raise ReadError("not a gzip file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
fileobj = bz2.BZ2File(fileobj or name, mode,
compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (OSError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not a bzip2 file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
"""Open lzma compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (lzma.LZMAError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not an lzma file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open", # bzip2 compressed tar
"xz": "xzopen" # lzma compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
_safe_print(stat.filemode(tarinfo.mode))
_safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid))
if tarinfo.ischr() or tarinfo.isblk():
_safe_print("%10s" %
("%d,%d" % (tarinfo.devmajor, tarinfo.devminor)))
else:
_safe_print("%10d" % tarinfo.size)
_safe_print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6])
_safe_print(tarinfo.name + ("/" if tarinfo.isdir() else ""))
if verbose:
if tarinfo.issym():
_safe_print("-> " + tarinfo.linkname)
if tarinfo.islnk():
_safe_print("link to " + tarinfo.linkname)
print()
def add(self, name, arcname=None, recursive=True, exclude=None, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except OSError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file or a
link, an io.BufferedReader object is returned. Otherwise, None is
returned.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Members with unknown types are treated as regular files.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except FileExistsError:
pass
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
with bltn_open(targetpath, "wb") as target:
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
os.chown(targetpath, u, g)
except OSError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except OSError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except OSError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise OSError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise OSError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if self.index == 0 and self.tarfile.firstmember is not None:
tarinfo = self.tarfile.next()
elif self.index < len(self.tarfile.members):
tarinfo = self.tarfile.members[self.index]
elif not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
raise StopIteration
self.index += 1
return tarinfo
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
def main():
import argparse
description = 'A simple command line interface for tarfile module.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Verbose output')
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', metavar='<tarfile>',
help='Show listing of a tarfile')
group.add_argument('-e', '--extract', nargs='+',
metavar=('<tarfile>', '<output_dir>'),
help='Extract tarfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create tarfile from sources')
group.add_argument('-t', '--test', metavar='<tarfile>',
help='Test if a tarfile is valid')
args = parser.parse_args()
if args.test:
src = args.test
if is_tarfile(src):
with open(src, 'r') as tar:
tar.getmembers()
print(tar.getmembers(), file=sys.stderr)
if args.verbose:
print('{!r} is a tar archive.'.format(src))
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.list:
src = args.list
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.list(verbose=args.verbose)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.extract:
if len(args.extract) == 1:
src = args.extract[0]
curdir = os.curdir
elif len(args.extract) == 2:
src, curdir = args.extract
else:
parser.exit(1, parser.format_help())
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.extractall(path=curdir)
if args.verbose:
if curdir == '.':
msg = '{!r} file is extracted.'.format(src)
else:
msg = ('{!r} file is extracted '
'into {!r} directory.').format(src, curdir)
print(msg)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.create:
tar_name = args.create.pop(0)
_, ext = os.path.splitext(tar_name)
compressions = {
# gz
'gz': 'gz',
'tgz': 'gz',
# xz
'xz': 'xz',
'txz': 'xz',
# bz2
'bz2': 'bz2',
'tbz': 'bz2',
'tbz2': 'bz2',
'tb2': 'bz2',
}
tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w'
tar_files = args.create
with TarFile.open(tar_name, tar_mode) as tf:
for file_name in tar_files:
tf.add(file_name)
if args.verbose:
print('{!r} file created.'.format(tar_name))
else:
parser.exit(1, parser.format_help())
if __name__ == '__main__':
main()
| gpl-2.0 |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/whoosh/query/terms.py | 39 | 17708 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
import fnmatch
import re
from collections import defaultdict
from whoosh import matching
from whoosh.analysis import Token
from whoosh.compat import bytes_type, text_type, u
from whoosh.lang.morph_en import variations
from whoosh.query import qcore
class Term(qcore.Query):
"""Matches documents containing the given term (fieldname+text pair).
>>> Term("content", u"render")
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float)
def __init__(self, fieldname, text, boost=1.0, minquality=None):
self.fieldname = fieldname
self.text = text
self.boost = boost
self.minquality = minquality
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text
and self.boost == other.boost)
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1.0:
r += ", boost=%s" % self.boost
r += ")"
return r
def __unicode__(self):
text = self.text
if isinstance(text, bytes_type):
try:
text = text.decode("ascii")
except UnicodeDecodeError:
text = repr(text)
t = u("%s:%s") % (self.fieldname, text)
if self.boost != 1:
t += u("^") + text_type(self.boost)
return t
__str__ = __unicode__
def __hash__(self):
return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
def has_terms(self):
return True
def tokens(self, boost=1.0):
yield Token(fieldname=self.fieldname, text=self.text,
boost=boost * self.boost, startchar=self.startchar,
endchar=self.endchar, chars=True)
def terms(self, phrases=False):
if self.field():
yield (self.field(), self.text)
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname and q.text == oldtext:
q.text = newtext
return q
def estimate_size(self, ixreader):
fieldname = self.fieldname
if fieldname not in ixreader.schema:
return 0
field = ixreader.schema[fieldname]
try:
text = field.to_bytes(self.text)
except ValueError:
return 0
return ixreader.doc_frequency(fieldname, text)
def matcher(self, searcher, context=None):
fieldname = self.fieldname
text = self.text
if fieldname not in searcher.schema:
return matching.NullMatcher()
field = searcher.schema[fieldname]
try:
text = field.to_bytes(text)
except ValueError:
return matching.NullMatcher()
if (self.fieldname, text) in searcher.reader():
if context is None:
w = searcher.weighting
else:
w = context.weighting
m = searcher.postings(self.fieldname, text, weighting=w)
if self.minquality:
m.set_min_quality(self.minquality)
if self.boost != 1.0:
m = matching.WrappingMatcher(m, boost=self.boost)
return m
else:
return matching.NullMatcher()
class MultiTerm(qcore.Query):
"""Abstract base class for queries that operate on multiple terms in the
same field.
"""
constantscore = False
def _btexts(self, ixreader):
raise NotImplementedError(self.__class__.__name__)
def expanded_terms(self, ixreader, phrases=False):
fieldname = self.field()
if fieldname:
for btext in self._btexts(ixreader):
yield (fieldname, btext)
def tokens(self, boost=1.0, exreader=None):
fieldname = self.field()
if exreader is None:
btexts = [self.text]
else:
btexts = self._btexts(exreader)
for btext in btexts:
yield Token(fieldname=fieldname, text=btext,
boost=boost * self.boost, startchar=self.startchar,
endchar=self.endchar, chars=True)
def simplify(self, ixreader):
fieldname = self.field()
if fieldname not in ixreader.schema:
return qcore.NullQuery()
field = ixreader.schema[fieldname]
existing = []
for btext in sorted(set(self._btexts(ixreader))):
text = field.from_bytes(btext)
existing.append(Term(fieldname, text, boost=self.boost))
if len(existing) == 1:
return existing[0]
elif existing:
from whoosh.query import Or
return Or(existing)
else:
return qcore.NullQuery
def estimate_size(self, ixreader):
fieldname = self.field()
return sum(ixreader.doc_frequency(fieldname, btext)
for btext in self._btexts(ixreader))
def estimate_min_size(self, ixreader):
fieldname = self.field()
return min(ixreader.doc_frequency(fieldname, text)
for text in self._btexts(ixreader))
def matcher(self, searcher, context=None):
from whoosh.query import Or
fieldname = self.field()
constantscore = self.constantscore
reader = searcher.reader()
qs = [Term(fieldname, word) for word in self._btexts(reader)]
if not qs:
return matching.NullMatcher()
if len(qs) == 1:
# If there's only one term, just use it
m = qs[0].matcher(searcher, context)
else:
if constantscore:
# To tell the sub-query that score doesn't matter, set weighting
# to None
if context:
context = context.set(weighting=None)
else:
from whoosh.searching import SearchContext
context = SearchContext(weighting=None)
# Or the terms together
m = Or(qs, boost=self.boost).matcher(searcher, context)
return m
class PatternQuery(MultiTerm):
"""An intermediate base class for common methods of Prefix and Wildcard.
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float)
def __init__(self, fieldname, text, boost=1.0, constantscore=True):
self.fieldname = fieldname
self.text = text
self.boost = boost
self.constantscore = constantscore
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text and self.boost == other.boost
and self.constantscore == other.constantscore)
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1:
r += ", boost=%s" % self.boost
r += ")"
return r
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
^ hash(self.constantscore))
def _get_pattern(self):
raise NotImplementedError
def _find_prefix(self, text):
# Subclasses/instances should set the SPECIAL_CHARS attribute to a set
# of characters that mark the end of the literal prefix
specialchars = self.SPECIAL_CHARS
i = 0
for i, char in enumerate(text):
if char in specialchars:
break
return text[:i]
def _btexts(self, ixreader):
field = ixreader.schema[self.fieldname]
exp = re.compile(self._get_pattern())
prefix = self._find_prefix(self.text)
if prefix:
candidates = ixreader.expand_prefix(self.fieldname, prefix)
else:
candidates = ixreader.lexicon(self.fieldname)
from_bytes = field.from_bytes
for btext in candidates:
text = from_bytes(btext)
if exp.match(text):
yield btext
class Prefix(PatternQuery):
"""Matches documents that contain any terms that start with the given text.
>>> # Match documents containing words starting with 'comp'
>>> Prefix("content", u"comp")
"""
def __unicode__(self):
return "%s:%s*" % (self.fieldname, self.text)
__str__ = __unicode__
def _btexts(self, ixreader):
return ixreader.expand_prefix(self.fieldname, self.text)
def matcher(self, searcher, context=None):
if self.text == "":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
class Wildcard(PatternQuery):
"""Matches documents that contain any terms that match a "glob" pattern.
See the Python ``fnmatch`` module for information about globs.
>>> Wildcard("content", u"in*f?x")
"""
SPECIAL_CHARS = frozenset("*?[")
def __unicode__(self):
return "%s:%s" % (self.fieldname, self.text)
__str__ = __unicode__
def _get_pattern(self):
return fnmatch.translate(self.text)
def normalize(self):
# If there are no wildcard characters in this "wildcard", turn it into
# a simple Term
text = self.text
if text == "*":
from whoosh.query import Every
return Every(self.fieldname, boost=self.boost)
if "*" not in text and "?" not in text:
# If no wildcard chars, convert to a normal term.
return Term(self.fieldname, self.text, boost=self.boost)
elif ("?" not in text and text.endswith("*")
and text.find("*") == len(text) - 1):
# If the only wildcard char is an asterisk at the end, convert to a
# Prefix query.
return Prefix(self.fieldname, self.text[:-1], boost=self.boost)
else:
return self
def matcher(self, searcher, context=None):
if self.text == "*":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
# _btexts() implemented in PatternQuery
class Regex(PatternQuery):
"""Matches documents that contain any terms that match a regular
expression. See the Python ``re`` module for information about regular
expressions.
"""
SPECIAL_CHARS = frozenset("{}()[].?*+^$\\")
def __unicode__(self):
return '%s:r"%s"' % (self.fieldname, self.text)
__str__ = __unicode__
def _get_pattern(self):
return self.text
def _find_prefix(self, text):
if "|" in text:
return ""
if text.startswith("^"):
text = text[1:]
elif text.startswith("\\A"):
text = text[2:]
prefix = PatternQuery._find_prefix(self, text)
lp = len(prefix)
if lp < len(text) and text[lp] in "*?":
# we stripped something starting from * or ? - they both MAY mean
# "0 times". As we had stripped starting from FIRST special char,
# that implies there were only ordinary chars left of it. Thus,
# the very last of them is not part of the real prefix:
prefix = prefix[:-1]
return prefix
def matcher(self, searcher, context=None):
if self.text == ".*":
from whoosh.query import Every
eq = Every(self.fieldname, boost=self.boost)
return eq.matcher(searcher, context)
else:
return PatternQuery.matcher(self, searcher, context)
# _btexts() implemented in PatternQuery
class ExpandingTerm(MultiTerm):
"""Intermediate base class for queries such as FuzzyTerm and Variations
that expand into multiple queries, but come from a single term.
"""
def has_terms(self):
return True
def terms(self, phrases=False):
if self.field():
yield (self.field(), self.text)
class FuzzyTerm(ExpandingTerm):
"""Matches documents containing words similar to the given term.
"""
__inittypes__ = dict(fieldname=str, text=text_type, boost=float,
maxdist=float, prefixlength=int)
def __init__(self, fieldname, text, boost=1.0, maxdist=1,
prefixlength=1, constantscore=True):
"""
:param fieldname: The name of the field to search.
:param text: The text to search for.
:param boost: A boost factor to apply to scores of documents matching
this query.
:param maxdist: The maximum edit distance from the given text.
:param prefixlength: The matched terms must share this many initial
characters with 'text'. For example, if text is "light" and
prefixlength is 2, then only terms starting with "li" are checked
for similarity.
"""
self.fieldname = fieldname
self.text = text
self.boost = boost
self.maxdist = maxdist
self.prefixlength = prefixlength
self.constantscore = constantscore
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text
and self.maxdist == other.maxdist
and self.prefixlength == other.prefixlength
and self.boost == other.boost
and self.constantscore == other.constantscore)
def __repr__(self):
r = "%s(%r, %r, boost=%f, maxdist=%d, prefixlength=%d)"
return r % (self.__class__.__name__, self.fieldname, self.text,
self.boost, self.maxdist, self.prefixlength)
def __unicode__(self):
r = u("%s:%s") % (self.fieldname, self.text) + u("~")
if self.maxdist > 1:
r += u("%d") % self.maxdist
if self.boost != 1.0:
r += u("^%f") % self.boost
return r
__str__ = __unicode__
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
^ hash(self.maxdist) ^ hash(self.prefixlength)
^ hash(self.constantscore))
def _btexts(self, ixreader):
return ixreader.terms_within(self.fieldname, self.text, self.maxdist,
prefix=self.prefixlength)
class Variations(ExpandingTerm):
"""Query that automatically searches for morphological variations of the
given word in the same field.
"""
def __init__(self, fieldname, text, boost=1.0):
self.fieldname = fieldname
self.text = text
self.boost = boost
def __repr__(self):
r = "%s(%r, %r" % (self.__class__.__name__, self.fieldname, self.text)
if self.boost != 1:
r += ", boost=%s" % self.boost
r += ")"
return r
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.text == other.text and self.boost == other.boost)
def __hash__(self):
return hash(self.fieldname) ^ hash(self.text) ^ hash(self.boost)
def _btexts(self, ixreader):
fieldname = self.fieldname
to_bytes = ixreader.schema[fieldname].to_bytes
for word in variations(self.text):
try:
btext = to_bytes(word)
except ValueError:
continue
if (fieldname, btext) in ixreader:
yield btext
def __unicode__(self):
return u("%s:<%s>") % (self.fieldname, self.text)
__str__ = __unicode__
def replace(self, fieldname, oldtext, newtext):
q = copy.copy(self)
if q.fieldname == fieldname and q.text == oldtext:
q.text = newtext
return q
| bsd-3-clause |
duncanwp/iris | lib/iris/experimental/__init__.py | 17 | 1074 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Experimental code can be introduced to Iris through this package.
Changes to experimental code may be more extensive than in the rest of the
codebase. The code is expected to graduate, eventually, to "full status".
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
| lgpl-3.0 |
veger/ansible | lib/ansible/modules/cloud/azure/azure_rm_containerinstance_facts.py | 33 | 9452 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerinstance_facts
version_added: "2.8"
short_description: Get Azure Container Instance facts.
description:
- Get facts of Container Instance.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the container instance.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get specific Container Instance facts
azure_rm_containerinstance_facts:
resource_group: resource_group_name
name: container_group_name
- name: List Container Instances in a specified resource group name
azure_rm_containerinstance_facts:
resource_group: resource_group_name
'''
RETURN = '''
container_groups:
description: A list of Container Instance dictionaries.
returned: always
type: complex
contains:
id:
description:
- The resource id.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/demo/providers/Microsoft.ContainerInstance/containerGroups/my
containers"
resource_group:
description:
- Resource group where the container exists.
returned: always
type: str
sample: testrg
name:
description:
- The resource name.
returned: always
type: str
sample: mycontainers
location:
description:
- The resource location.
returned: always
type: str
sample: westus
os_type:
description:
- The OS type of containers.
returned: always
type: str
sample: linux
ip_address:
description:
- IP address of the container instance.
returned: always
type: str
sample: 173.15.18.1
ports:
description:
- List of ports exposed by the container instance.
returned: always
type: list
sample: [ 80, 81 ]
containers:
description:
- The containers within the container group.
returned: always
type: complex
sample: containers
contains:
name:
description:
- The name of the container instance.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/demo/providers/Microsoft.ContainerInstance/containerGroups/my
containers"
image:
description:
- The container image name.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/demo/providers/Microsoft.ContainerInstance/containerGroups/my
containers"
memory:
description:
- The required memory of the containers in GB.
returned: always
type: float
sample: 1.5
cpu:
description:
- The required number of CPU cores of the containers.
returned: always
type: int
sample: 1
ports:
description:
- List of ports exposed within the container group.
returned: always
type: list
sample: [ 80, 81 ]
tags:
description: Tags assigned to the resource. Dictionary of string:string pairs.
type: dict
sample: { "tag1": "abc" }
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMContainerInstanceFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict(
changed=False,
ansible_facts=dict()
)
self.resource_group = None
self.name = None
super(AzureRMContainerInstanceFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.name is not None):
self.results['containerinstances'] = self.get()
elif (self.resource_group is not None):
self.results['containerinstances'] = self.list_by_resource_group()
else:
self.results['containerinstances'] = self.list_all()
return self.results
def get(self):
response = None
results = []
try:
response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group,
container_group_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Container Instances.')
if response is not None and self.has_tags(response.tags, self.tags):
results.append(self.format_item(response))
return results
def list_by_resource_group(self):
response = None
results = []
try:
response = self.containerinstance_client.container_groups.list_by_resource_group(resource_group_name=self.resource_group)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not list facts for Container Instances.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def list_all(self):
response = None
results = []
try:
response = self.containerinstance_client.container_groups.list()
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not list facts for Container Instances.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
containers = d['containers']
ports = d['ip_address']['ports']
resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
for port_index in range(len(ports)):
ports[port_index] = ports[port_index]['port']
for container_index in range(len(containers)):
old_container = containers[container_index]
new_container = {
'name': old_container['name'],
'image': old_container['image'],
'memory': old_container['resources']['requests']['memory_in_gb'],
'cpu': old_container['resources']['requests']['cpu'],
'ports': []
}
for port_index in range(len(old_container['ports'])):
new_container['ports'].append(old_container['ports'][port_index]['port'])
containers[container_index] = new_container
d = {
'id': d['id'],
'resource_group': resource_group,
'name': d['name'],
'os_type': d['os_type'],
'ip_address': 'public' if d['ip_address']['type'] == 'Public' else 'none',
'ports': ports,
'location': d['location'],
'containers': containers,
'tags': d.get('tags', None)
}
return d
def main():
AzureRMContainerInstanceFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
chirilo/remo | vendor-local/lib/python/rest_framework/utils/serializer_helpers.py | 18 | 3752 | from __future__ import unicode_literals
import collections
from rest_framework.compat import OrderedDict, unicode_to_repr
class ReturnDict(OrderedDict):
"""
Return object from `serialier.data` for the `Serializer` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnDict, self).__init__(*args, **kwargs)
def copy(self):
return ReturnDict(self, serializer=self.serializer)
def __repr__(self):
return dict.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (dict, (dict(self),))
class ReturnList(list):
"""
Return object from `serialier.data` for the `SerializerList` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnList, self).__init__(*args, **kwargs)
def __repr__(self):
return list.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (list, (list(self),))
class BoundField(object):
"""
A field object that also includes `.value` and `.error` properties.
Returned when iterating over a serializer instance,
providing an API similar to Django forms and form fields.
"""
def __init__(self, field, value, errors, prefix=''):
self._field = field
self.value = value
self.errors = errors
self.name = prefix + self.field_name
def __getattr__(self, attr_name):
return getattr(self._field, attr_name)
@property
def _proxy_class(self):
return self._field.__class__
def __repr__(self):
return unicode_to_repr('<%s value=%s errors=%s>' % (
self.__class__.__name__, self.value, self.errors
))
class NestedBoundField(BoundField):
"""
This `BoundField` additionally implements __iter__ and __getitem__
in order to support nested bound fields. This class is the type of
`BoundField` that is used for serializer fields.
"""
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.value.get(key) if self.value else None
error = self.errors.get(key) if self.errors else None
if hasattr(field, 'fields'):
return NestedBoundField(field, value, error, prefix=self.name + '.')
return BoundField(field, value, error, prefix=self.name + '.')
class BindingDict(collections.MutableMapping):
"""
This dict-like object is used to store fields on a serializer.
This ensures that whenever fields are added to the serializer we call
`field.bind()` so that the `field_name` and `parent` attributes
can be set correctly.
"""
def __init__(self, serializer):
self.serializer = serializer
self.fields = OrderedDict()
def __setitem__(self, key, field):
self.fields[key] = field
field.bind(field_name=key, parent=self.serializer)
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __repr__(self):
return dict.__repr__(self.fields)
| bsd-3-clause |
krdlab/ansible-modules-core | cloud/amazon/ec2_key.py | 51 | 7841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== [email protected]'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
if not module.check_mode:
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
key = None
changed = True
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
yx91490/pgcli | pgcli/packages/counter.py | 20 | 6273 | #copied from http://code.activestate.com/recipes/576611-counter-class/
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
| bsd-3-clause |
popazerty/enigma2-4.3 | lib/python/Components/UsageConfig.py | 2 | 55113 | import os
from time import time
from enigma import eDVBDB, eEPGCache, setTunerTypePriorityOrder, setPreferredTuner, setSpinnerOnOff, setEnableTtCachingOnOff, eEnv, Misc_Options, eBackgroundFileEraser, eServiceEvent
from Components.About import about
from Components.Harddisk import harddiskmanager
from config import ConfigSubsection, ConfigYesNo, config, ConfigSelection, ConfigText, ConfigNumber, ConfigSet, ConfigLocations, NoSave, ConfigClock, ConfigInteger, ConfigBoolean, ConfigPassword, ConfigIP, ConfigSlider, ConfigSelectionNumber
from Tools.Directories import resolveFilename, SCOPE_HDD, SCOPE_TIMESHIFT, SCOPE_AUTORECORD, SCOPE_SYSETC, defaultRecordingLocation, fileExists
from boxbranding import getBoxType, getMachineBuild, getMachineName, getBrandOEM
from Components.NimManager import nimmanager
from Components.ServiceList import refreshServiceList
from SystemInfo import SystemInfo
from Tools.HardwareInfo import HardwareInfo
def InitUsageConfig():
config.misc.useNTPminutes = ConfigSelection(default = "30", choices = [("30", "30" + " " +_("minutes")), ("60", _("Hour")), ("1440", _("Once per day"))])
if getBrandOEM() == 'vuplus':
config.misc.remotecontrol_text_support = ConfigYesNo(default = True)
else:
config.misc.remotecontrol_text_support = ConfigYesNo(default = False)
config.workaround = ConfigSubsection()
config.workaround.deeprecord = ConfigYesNo(default = False)
config.usage = ConfigSubsection()
config.usage.shutdownOK = ConfigBoolean(default = True)
config.usage.shutdownNOK_action = ConfigSelection(default = "normal", choices = [("normal", _("just boot")), ("standby", _("goto standby")), ("deepstandby", _("goto deep-standby"))])
config.usage.boot_action = ConfigSelection(default = "normal", choices = [("normal", _("just boot")), ("standby", _("goto standby"))])
config.usage.showdish = ConfigSelection(default = "flashing", choices = [("flashing", _("Flashing")), ("normal", _("Not Flashing")), ("off", _("Off"))])
config.usage.multibouquet = ConfigYesNo(default = True)
config.usage.alternative_number_mode = ConfigYesNo(default = False)
def alternativeNumberModeChange(configElement):
eDVBDB.getInstance().setNumberingMode(configElement.value)
refreshServiceList()
config.usage.alternative_number_mode.addNotifier(alternativeNumberModeChange)
config.usage.crypto_icon_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename"))])
config.usage.crypto_icon_mode.addNotifier(refreshServiceList)
config.usage.panicbutton = ConfigYesNo(default = False)
config.usage.servicetype_icon_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename"))])
config.usage.servicetype_icon_mode.addNotifier(refreshServiceList)
choicelist = [("-1", _("Divide")), ("0", _("Disable"))]
for i in range(100,1300,100):
choicelist.append(("%d" % i, ngettext("%d pixel wide", "%d pixels wide", i) % i))
config.usage.servicelist_column = ConfigSelection(default="0", choices=choicelist)
config.usage.servicelist_column.addNotifier(refreshServiceList)
config.usage.service_icon_enable = ConfigYesNo(default = False)
config.usage.service_icon_enable.addNotifier(refreshServiceList)
config.usage.servicelist_cursor_behavior = ConfigSelection(default = "keep", choices = [
("standard", _("Standard")),
("keep", _("Keep service")),
("reverseB", _("Reverse bouquet buttons")),
("keep reverseB", _("Keep service") + " + " + _("Reverse bouquet buttons"))])
config.usage.servicelist_keep_service = ConfigYesNo(default = True)
config.usage.multiepg_ask_bouquet = ConfigYesNo(default = False)
config.usage.showpicon = ConfigYesNo(default = True)
config.usage.show_dvdplayer = ConfigYesNo(default = False)
config.usage.quickzap_bouquet_change = ConfigYesNo(default = False)
config.usage.e1like_radio_mode = ConfigYesNo(default = True)
choicelist = []
for i in range(1, 11):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
config.usage.infobar_timeout = ConfigSelection(default = "5", choices = [("0", _("No timeout"))] + choicelist)
config.usage.show_infobar_on_zap = ConfigYesNo(default = True)
config.usage.show_infobar_on_skip = ConfigYesNo(default = True)
config.usage.show_infobar_on_event_change = ConfigYesNo(default = False)
config.usage.show_infobar_channel_number = ConfigYesNo(default = False)
config.usage.show_infobar_lite = ConfigYesNo(default = False)
config.usage.show_infobar_channel_number = ConfigYesNo(default = False)
config.usage.show_second_infobar = ConfigSelection(default = "2", choices = [("0", _("Off")), ("1", _("Event Info")), ("2", _("2nd Infobar INFO"))])
config.usage.second_infobar_timeout = ConfigSelection(default = "0", choices = [("0", _("No timeout"))] + choicelist)
def showsecondinfobarChanged(configElement):
if config.usage.show_second_infobar.value != "INFOBAREPG":
SystemInfo["InfoBarEpg"] = True
else:
SystemInfo["InfoBarEpg"] = False
config.usage.show_second_infobar.addNotifier(showsecondinfobarChanged, immediate_feedback = True)
config.usage.infobar_frontend_source = ConfigSelection(default = "tuner", choices = [("settings", _("LameDB")), ("tuner", _("Tuner"))])
config.usage.show_picon_bkgrn = ConfigSelection(default = "transparent", choices = [("none", _("Disabled")), ("transparent", _("Transparent")), ("blue", _("Blue")), ("red", _("Red")), ("black", _("Black")), ("white", _("White")), ("lightgrey", _("Light Grey")), ("grey", _("Grey"))])
config.usage.show_spinner = ConfigYesNo(default = True)
config.usage.enable_tt_caching = ConfigYesNo(default = True)
config.usage.sort_settings = ConfigYesNo(default = False)
config.usage.sort_menus = ConfigYesNo(default = False)
config.usage.sort_pluginlist = ConfigYesNo(default = True)
config.usage.sort_extensionslist = ConfigYesNo(default = False)
config.usage.movieplayer_pvrstate = ConfigYesNo(default = False)
choicelist = []
for i in (10, 30):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
for i in (60, 120, 300, 600, 1200, 1800):
m = i / 60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
for i in (3600, 7200, 14400):
h = i / 3600
choicelist.append(("%d" % i, ngettext("%d hour", "%d hours", h) % h))
config.usage.hdd_standby = ConfigSelection(default = "60", choices = [("0", _("No standby"))] + choicelist)
config.usage.output_12V = ConfigSelection(default = "do not change", choices = [
("do not change", _("Do not change")), ("off", _("Off")), ("on", _("On")) ])
config.usage.pip_zero_button = ConfigSelection(default = "swapstop", choices = [
("standard", _("Standard")), ("swap", _("Swap PiP and main picture")),
("swapstop", _("Move PiP to main picture")), ("stop", _("Stop PiP")) ])
config.usage.pip_hideOnExit = ConfigSelection(default = "without popup", choices = [
("no", _("No")), ("popup", _("With popup")), ("without popup", _("Without popup")) ])
if not os.path.exists(resolveFilename(SCOPE_HDD)):
try:
os.mkdir(resolveFilename(SCOPE_HDD),0755)
except:
pass
config.usage.default_path = ConfigText(default = resolveFilename(SCOPE_HDD))
if not config.usage.default_path.value.endswith('/'):
tmpvalue = config.usage.default_path.value
config.usage.default_path.setValue(tmpvalue + '/')
config.usage.default_path.save()
def defaultpathChanged(configElement):
tmpvalue = config.usage.default_path.value
try:
if not os.path.exists(tmpvalue):
os.system("mkdir -p %s" %tmpvalue)
except:
print "Failed to create recording path: %s" %tmpvalue
if not config.usage.default_path.value.endswith('/'):
config.usage.default_path.setValue(tmpvalue + '/')
config.usage.default_path.save()
config.usage.default_path.addNotifier(defaultpathChanged, immediate_feedback = False)
config.usage.timer_path = ConfigText(default = "<default>")
config.usage.autorecord_path = ConfigText(default = "<default>")
config.usage.instantrec_path = ConfigText(default = "<default>")
if not os.path.exists(resolveFilename(SCOPE_TIMESHIFT)):
try:
os.mkdir(resolveFilename(SCOPE_TIMESHIFT),0755)
except:
pass
config.usage.timeshift_path = ConfigText(default = resolveFilename(SCOPE_TIMESHIFT))
if not config.usage.default_path.value.endswith('/'):
tmpvalue = config.usage.timeshift_path.value
config.usage.timeshift_path.setValue(tmpvalue + '/')
config.usage.timeshift_path.save()
def timeshiftpathChanged(configElement):
if not config.usage.timeshift_path.value.endswith('/'):
tmpvalue = config.usage.timeshift_path.value
config.usage.timeshift_path.setValue(tmpvalue + '/')
config.usage.timeshift_path.save()
config.usage.timeshift_path.addNotifier(timeshiftpathChanged, immediate_feedback = False)
config.usage.allowed_timeshift_paths = ConfigLocations(default = [resolveFilename(SCOPE_TIMESHIFT)])
if not os.path.exists(resolveFilename(SCOPE_AUTORECORD)):
try:
os.mkdir(resolveFilename(SCOPE_AUTORECORD),0755)
except:
pass
config.usage.autorecord_path = ConfigText(default = resolveFilename(SCOPE_AUTORECORD))
if not config.usage.default_path.value.endswith('/'):
tmpvalue = config.usage.autorecord_path.value
config.usage.autorecord_path.setValue(tmpvalue + '/')
config.usage.autorecord_path.save()
def autorecordpathChanged(configElement):
if not config.usage.autorecord_path.value.endswith('/'):
tmpvalue = config.usage.autorecord_path.value
config.usage.autorecord_path.setValue(tmpvalue + '/')
config.usage.autorecord_path.save()
config.usage.autorecord_path.addNotifier(autorecordpathChanged, immediate_feedback = False)
config.usage.allowed_autorecord_paths = ConfigLocations(default = [resolveFilename(SCOPE_AUTORECORD)])
config.usage.movielist_trashcan = ConfigYesNo(default=True)
config.usage.movielist_trashcan_days = ConfigSelectionNumber(min = 1, max = 31, stepwidth = 1, default = 7, wraparound = True)
config.usage.movielist_trashcan_network_clean = ConfigYesNo(default=False)
config.usage.movielist_trashcan_days = ConfigSelectionNumber(min = 1, max = 31, stepwidth = 1, default = 8, wraparound = True)
config.usage.movielist_trashcan_reserve = ConfigNumber(default = 40)
config.usage.on_movie_start = ConfigSelection(default = "ask", choices = [
("ask", _("Ask user")), ("resume", _("Resume from last position")), ("beginning", _("Start from the beginning")) ])
config.usage.on_movie_stop = ConfigSelection(default = "movielist", choices = [
("ask", _("Ask user")), ("movielist", _("Return to movie list")), ("quit", _("Return to previous service")) ])
config.usage.on_movie_eof = ConfigSelection(default = "movielist", choices = [
("ask", _("Ask user")), ("movielist", _("Return to movie list")), ("quit", _("Return to previous service")), ("pause", _("Pause movie at end")), ("playlist", _("Play next (return to movie list)")),
("playlistquit", _("Play next (return to previous service)")), ("loop", _("Continues play (loop)")), ("repeatcurrent", _("Repeat"))])
config.usage.next_movie_msg = ConfigYesNo(default = True)
config.usage.leave_movieplayer_onExit = ConfigSelection(default = "no", choices = [
("no", _("No")), ("popup", _("With popup")), ("without popup", _("Without popup")) ])
config.usage.setup_level = ConfigSelection(default = "expert", choices = [
("simple", _("Simple")),
("intermediate", _("Intermediate")),
("expert", _("Expert")) ])
config.usage.on_long_powerpress = ConfigSelection(default = "show_menu", choices = [
("show_menu", _("Show shutdown menu")),
("shutdown", _("Immediate shutdown")),
("standby", _("Standby")) ] )
config.usage.on_short_powerpress = ConfigSelection(default = "standby", choices = [
("show_menu", _("Show shutdown menu")),
("shutdown", _("Immediate shutdown")),
("standby", _("Standby")) ] )
choicelist = [("0", "Disabled")]
for i in (5, 30, 60, 300, 600, 900, 1200, 1800, 2700, 3600):
if i < 60:
m = ngettext("%d second", "%d seconds", i) % i
else:
m = abs(i / 60)
m = ngettext("%d minute", "%d minutes", m) % m
choicelist.append(("%d" % i, m))
config.usage.screen_saver = ConfigSelection(default = "0", choices = choicelist)
config.usage.check_timeshift = ConfigYesNo(default = True)
config.usage.alternatives_priority = ConfigSelection(default = "0", choices = [
("0", "DVB-S/-C/-T"),
("1", "DVB-S/-T/-C"),
("2", "DVB-C/-S/-T"),
("3", "DVB-C/-T/-S"),
("4", "DVB-T/-C/-S"),
("5", "DVB-T/-S/-C") ])
nims = [("-1", _("auto"))]
rec_nims = [("-2", _("Disabled")), ("-1", _("auto"))]
for x in nimmanager.nim_slots:
nims.append((str(x.slot), x.getSlotName()))
rec_nims.append((str(x.slot), x.getSlotName()))
config.usage.frontend_priority = ConfigSelection(default = "-1", choices = nims)
config.usage.recording_frontend_priority = ConfigSelection(default = "-2", choices = rec_nims)
config.misc.disable_background_scan = ConfigYesNo(default = False)
config.usage.jobtaksextensions = ConfigYesNo(default = True)
config.usage.servicenum_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.usage.servicename_fontsize = ConfigSelectionNumber(default = 2, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.usage.serviceinfo_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.usage.serviceitems_per_page = ConfigSelectionNumber(default = 20, stepwidth = 1, min = 3, max = 40, wraparound = True)
config.usage.show_servicelist = ConfigYesNo(default = True)
config.usage.servicelist_mode = ConfigSelection(default = "standard", choices = [
("standard", _("Standard")),
("simple", _("Simple")) ] )
config.usage.servicelistpreview_mode = ConfigYesNo(default = False)
config.usage.tvradiobutton_mode = ConfigSelection(default="BouquetList", choices = [
("ChannelList", _("Channel List")),
("BouquetList", _("Bouquet List")),
("MovieList", _("Movie List"))])
config.usage.channelbutton_mode = ConfigSelection(default="0", choices = [
("0", _("Just change channels")),
("1", _("Channel List")),
("2", _("Bouquet List"))])
config.usage.updownbutton_mode = ConfigSelection(default="1", choices = [
("0", _("Just change channels")),
("1", _("Channel List")),
("2", _("Just change channels revert"))])
config.usage.leftrightbutton_mode = ConfigSelection(default="0", choices = [
("0", _("Just change channels")),
("1", _("Channel List"))])
config.usage.okbutton_mode = ConfigSelection(default="0", choices = [
("0", _("InfoBar")),
("1", _("Channel List"))])
config.usage.show_bouquetalways = ConfigYesNo(default = False)
config.usage.show_event_progress_in_servicelist = ConfigSelection(default = 'barright', choices = [
('barleft', _("Progress bar left")),
('barright', _("Progress bar right")),
('percleft', _("Percentage left")),
('percright', _("Percentage right")),
('no', _("No")) ])
config.usage.show_channel_numbers_in_servicelist = ConfigYesNo(default = True)
config.usage.show_channel_jump_in_servicelist = ConfigSelection(default="alpha", choices = [
("quick", _("Quick Actions")),
("alpha", _("Alpha")),
("number", _("Number"))])
config.usage.show_event_progress_in_servicelist.addNotifier(refreshServiceList)
config.usage.show_channel_numbers_in_servicelist.addNotifier(refreshServiceList)
config.usage.blinking_display_clock_during_recording = ConfigYesNo(default = False)
config.usage.blinking_rec_symbol_during_recording = ConfigYesNo(default = False)
config.usage.show_message_when_recording_starts = ConfigYesNo(default = True)
config.usage.load_length_of_movies_in_moviellist = ConfigYesNo(default = True)
config.usage.show_icons_in_movielist = ConfigSelection(default = 'i', choices = [
('o', _("Off")),
('p', _("Progress")),
('s', _("Small progress")),
('i', _("Icons")),
])
config.usage.movielist_unseen = ConfigYesNo(default = True)
config.usage.swap_snr_on_osd = ConfigYesNo(default = False)
config.usage.swap_time_display_on_osd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_media_time_display_on_osd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_time_remaining_on_osd = ConfigSelection(default = "0", choices = [("0", _("Remaining")), ("1", _("Elapsed")), ("2", _("Elapsed & Remaining")), ("3", _("Remaining & Elapsed"))])
config.usage.elapsed_time_positive_osd = ConfigYesNo(default = False)
config.usage.swap_time_display_on_vfd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_media_time_display_on_vfd = ConfigSelection(default = "0", choices = [("0", _("Skin Setting")), ("1", _("Mins")), ("2", _("Mins Secs")), ("3", _("Hours Mins")), ("4", _("Hours Mins Secs")), ("5", _("Percentage"))])
config.usage.swap_time_remaining_on_vfd = ConfigSelection(default = "0", choices = [("0", _("Remaining")), ("1", _("Elapsed")), ("2", _("Elapsed & Remaining")), ("3", _("Remaining & Elapsed"))])
config.usage.elapsed_time_positive_vfd = ConfigYesNo(default = False)
config.usage.lcd_scroll_delay = ConfigSelection(default = "10000", choices = [
("10000", "10 " + _("seconds")),
("20000", "20 " + _("seconds")),
("30000", "30 " + _("seconds")),
("60000", "1 " + _("minute")),
("300000", "5 " + _("minutes")),
("noscrolling", _("off"))])
config.usage.lcd_scroll_speed = ConfigSelection(default = "300", choices = [
("500", _("slow")),
("300", _("normal")),
("100", _("fast"))])
def SpinnerOnOffChanged(configElement):
setSpinnerOnOff(int(configElement.value))
config.usage.show_spinner.addNotifier(SpinnerOnOffChanged)
def EnableTtCachingChanged(configElement):
setEnableTtCachingOnOff(int(configElement.value))
config.usage.enable_tt_caching.addNotifier(EnableTtCachingChanged)
def TunerTypePriorityOrderChanged(configElement):
setTunerTypePriorityOrder(int(configElement.value))
config.usage.alternatives_priority.addNotifier(TunerTypePriorityOrderChanged, immediate_feedback=False)
def PreferredTunerChanged(configElement):
setPreferredTuner(int(configElement.value))
config.usage.frontend_priority.addNotifier(PreferredTunerChanged)
config.usage.hide_zap_errors = ConfigYesNo(default = True)
config.usage.hide_ci_messages = ConfigYesNo(default = True)
config.usage.show_cryptoinfo = ConfigSelection([("0", _("Off")),("1", _("One line")),("2", _("Two lines"))], "2")
config.usage.show_eit_nownext = ConfigYesNo(default = True)
config.usage.show_vcr_scart = ConfigYesNo(default = False)
config.epg = ConfigSubsection()
config.epg.eit = ConfigYesNo(default = True)
config.epg.mhw = ConfigYesNo(default = False)
config.epg.freesat = ConfigYesNo(default = True)
config.epg.viasat = ConfigYesNo(default = True)
config.epg.netmed = ConfigYesNo(default = True)
config.epg.virgin = ConfigYesNo(default = False)
def EpgSettingsChanged(configElement):
from enigma import eEPGCache
mask = 0xffffffff
if not config.epg.eit.value:
mask &= ~(eEPGCache.NOWNEXT | eEPGCache.SCHEDULE | eEPGCache.SCHEDULE_OTHER)
if not config.epg.mhw.value:
mask &= ~eEPGCache.MHW
if not config.epg.freesat.value:
mask &= ~(eEPGCache.FREESAT_NOWNEXT | eEPGCache.FREESAT_SCHEDULE | eEPGCache.FREESAT_SCHEDULE_OTHER)
if not config.epg.viasat.value:
mask &= ~eEPGCache.VIASAT
if not config.epg.netmed.value:
mask &= ~(eEPGCache.NETMED_SCHEDULE | eEPGCache.NETMED_SCHEDULE_OTHER)
if not config.epg.virgin.value:
mask &= ~(eEPGCache.VIRGIN_NOWNEXT | eEPGCache.VIRGIN_SCHEDULE)
eEPGCache.getInstance().setEpgSources(mask)
config.epg.eit.addNotifier(EpgSettingsChanged)
config.epg.mhw.addNotifier(EpgSettingsChanged)
config.epg.freesat.addNotifier(EpgSettingsChanged)
config.epg.viasat.addNotifier(EpgSettingsChanged)
config.epg.netmed.addNotifier(EpgSettingsChanged)
config.epg.virgin.addNotifier(EpgSettingsChanged)
config.epg.histminutes = ConfigSelectionNumber(min = 0, max = 120, stepwidth = 15, default = 0, wraparound = True)
def EpgHistorySecondsChanged(configElement):
eEPGCache.getInstance().setEpgHistorySeconds(config.epg.histminutes.value*60)
config.epg.histminutes.addNotifier(EpgHistorySecondsChanged)
config.epg.cacheloadsched = ConfigYesNo(default = False)
config.epg.cachesavesched = ConfigYesNo(default = False)
def EpgCacheLoadSchedChanged(configElement):
import EpgLoadSave
EpgLoadSave.EpgCacheLoadCheck()
def EpgCacheSaveSchedChanged(configElement):
import EpgLoadSave
EpgLoadSave.EpgCacheSaveCheck()
config.epg.cacheloadsched.addNotifier(EpgCacheLoadSchedChanged, immediate_feedback = False)
config.epg.cachesavesched.addNotifier(EpgCacheSaveSchedChanged, immediate_feedback = False)
config.epg.cacheloadtimer = ConfigSelectionNumber(default = 24, stepwidth = 1, min = 1, max = 24, wraparound = True)
config.epg.cachesavetimer = ConfigSelectionNumber(default = 24, stepwidth = 1, min = 1, max = 24, wraparound = True)
config.osd.dst_left = ConfigSelectionNumber(default = 0, stepwidth = 1, min = 0, max = 720, wraparound = False)
config.osd.dst_width = ConfigSelectionNumber(default = 720, stepwidth = 1, min = 0, max = 720, wraparound = False)
config.osd.dst_top = ConfigSelectionNumber(default = 0, stepwidth = 1, min = 0, max = 576, wraparound = False)
config.osd.dst_height = ConfigSelectionNumber(default = 576, stepwidth = 1, min = 0, max = 576, wraparound = False)
config.osd.alpha = ConfigSelectionNumber(default = 255, stepwidth = 1, min = 0, max = 255, wraparound = False)
config.osd.alpha_teletext = ConfigSelectionNumber(default = 255, stepwidth = 1, min = 0, max = 255, wraparound = False)
config.av.osd_alpha = NoSave(ConfigNumber(default = 255))
config.osd.threeDmode = ConfigSelection([("off", _("Off")), ("auto", _("Auto")), ("sidebyside", _("Side by Side")),("topandbottom", _("Top and Bottom"))], "auto")
config.osd.threeDznorm = ConfigSlider(default = 50, increment = 1, limits = (0, 100))
config.osd.show3dextensions = ConfigYesNo(default = False)
choiceoptions = [("mode1", _("Mode 1")), ("mode2", _("Mode 2"))]
config.osd.threeDsetmode = ConfigSelection(default = 'mode1' , choices = choiceoptions )
hddchoises = [('/etc/enigma2/', 'Internal Flash')]
for p in harddiskmanager.getMountedPartitions():
if os.path.exists(p.mountpoint):
d = os.path.normpath(p.mountpoint)
if p.mountpoint != '/':
hddchoises.append((p.mountpoint, d))
config.misc.epgcachepath = ConfigSelection(default = '/etc/enigma2/', choices = hddchoises)
config.misc.epgcachefilename = ConfigText(default='epg', fixed_size=False)
config.misc.epgcache_filename = ConfigText(default = (config.misc.epgcachepath.value + config.misc.epgcachefilename.value.replace('.dat','') + '.dat'))
def EpgCacheChanged(configElement):
config.misc.epgcache_filename.setValue(os.path.join(config.misc.epgcachepath.value, config.misc.epgcachefilename.value.replace('.dat','') + '.dat'))
config.misc.epgcache_filename.save()
eEPGCache.getInstance().setCacheFile(config.misc.epgcache_filename.value)
epgcache = eEPGCache.getInstance()
epgcache.save()
if not config.misc.epgcache_filename.value.startswith("/etc/enigma2/"):
if os.path.exists('/etc/enigma2/' + config.misc.epgcachefilename.value.replace('.dat','') + '.dat'):
os.remove('/etc/enigma2/' + config.misc.epgcachefilename.value.replace('.dat','') + '.dat')
config.misc.epgcachepath.addNotifier(EpgCacheChanged, immediate_feedback = False)
config.misc.epgcachefilename.addNotifier(EpgCacheChanged, immediate_feedback = False)
config.misc.showradiopic = ConfigYesNo(default = True)
config.misc.bootvideo = ConfigYesNo(default = False)
def setHDDStandby(configElement):
for hdd in harddiskmanager.HDDList():
hdd[1].setIdleTime(int(configElement.value))
config.usage.hdd_standby.addNotifier(setHDDStandby, immediate_feedback=False)
if SystemInfo["12V_Output"]:
def set12VOutput(configElement):
Misc_Options.getInstance().set_12V_output(configElement.value == "on" and 1 or 0)
config.usage.output_12V.addNotifier(set12VOutput, immediate_feedback=False)
config.usage.keymap = ConfigText(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"))
if getMachineName().lower().startswith('xp') or getMachineName().lower().startswith('lx') or getBoxType().startswith('atemio'):
if fileExists(eEnv.resolve("${datadir}/enigma2/keymap.usr")):
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xpe"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.xpe"), _("Xpeed keymap - keymap.xpe")),
(eEnv.resolve("${datadir}/enigma2/keymap.usr"), _("User keymap - keymap.usr")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
else:
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xpe"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.xpe"), _("Xpeed keymap - keymap.xpe")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
else:
if fileExists(eEnv.resolve("${datadir}/enigma2/keymap.usr")):
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.usr"), _("User keymap - keymap.usr")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
else:
config.usage.keymap = ConfigSelection(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"), choices = [
(eEnv.resolve("${datadir}/enigma2/keymap.xml"), _("Default keymap - keymap.xml")),
(eEnv.resolve("${datadir}/enigma2/keymap.ntr"), _("Neutrino keymap - keymap.ntr")),
(eEnv.resolve("${datadir}/enigma2/keymap.u80"), _("U80 keymap - keymap.u80"))])
config.network = ConfigSubsection()
if SystemInfo["WakeOnLAN"]:
def wakeOnLANChanged(configElement):
if getBoxType() in ('et10000', 'gbquadplus', 'gbquad', 'gb800ueplus', 'gb800seplus', 'gbultraue', 'gbultrase', 'gbipbox', 'quadbox2400', 'mutant2400'):
open(SystemInfo["WakeOnLAN"], "w").write(configElement.value and "on" or "off")
else:
open(SystemInfo["WakeOnLAN"], "w").write(configElement.value and "enable" or "disable")
config.network.wol = ConfigYesNo(default = False)
config.network.wol.addNotifier(wakeOnLANChanged)
config.network.AFP_autostart = ConfigYesNo(default = False)
config.network.NFS_autostart = ConfigYesNo(default = False)
config.network.OpenVPN_autostart = ConfigYesNo(default = False)
config.network.Samba_autostart = ConfigYesNo(default = True)
config.network.Inadyn_autostart = ConfigYesNo(default = False)
config.network.uShare_autostart = ConfigYesNo(default = False)
config.softwareupdate = ConfigSubsection()
config.softwareupdate.autosettingsbackup = ConfigYesNo(default = False)
config.softwareupdate.autoimagebackup = ConfigYesNo(default = False)
config.softwareupdate.check = ConfigYesNo(default = False)
config.softwareupdate.checktimer = ConfigSelectionNumber(min = 1, max = 48, stepwidth = 1, default = 24, wraparound = True)
config.softwareupdate.updatelastcheck = ConfigInteger(default=0)
config.softwareupdate.updatefound = NoSave(ConfigBoolean(default = False))
config.softwareupdate.updatebeta = ConfigYesNo(default = False)
config.softwareupdate.updateisunstable = ConfigYesNo(default = False)
config.timeshift = ConfigSubsection()
choicelist = [("0", "Disabled")]
for i in (2, 3, 4, 5, 10, 20, 30):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
for i in (60, 120, 300):
m = i / 60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
config.timeshift.startdelay = ConfigSelection(default = "0", choices = choicelist)
config.timeshift.showinfobar = ConfigYesNo(default = True)
config.timeshift.stopwhilerecording = ConfigYesNo(default = False)
config.timeshift.favoriteSaveAction = ConfigSelection([("askuser", _("Ask user")),("savetimeshift", _("Save and stop")),("savetimeshiftandrecord", _("Save and record")),("noSave", _("Don't save"))], "askuser")
config.timeshift.autorecord = ConfigYesNo(default = False)
config.timeshift.isRecording = NoSave(ConfigYesNo(default = False))
config.timeshift.timeshiftMaxHours = ConfigSelectionNumber(min = 1, max = 999, stepwidth = 1, default = 12, wraparound = True)
config.timeshift.deleteAfterZap = ConfigYesNo(default = True)
config.seek = ConfigSubsection()
config.seek.baractivation = ConfigSelection([("leftright", _("Long Left/Right")),("ffrw", _("Long << / >>"))], "leftright")
config.seek.sensibility = ConfigSelectionNumber(min = 1, max = 10, stepwidth = 1, default = 10, wraparound = True)
config.seek.selfdefined_13 = ConfigSelectionNumber(min = 1, max = 300, stepwidth = 1, default = 15, wraparound = True)
config.seek.selfdefined_46 = ConfigSelectionNumber(min = 1, max = 600, stepwidth = 1, default = 60, wraparound = True)
config.seek.selfdefined_79 = ConfigSelectionNumber(min = 1, max = 1200, stepwidth = 1, default = 300, wraparound = True)
config.seek.speeds_forward = ConfigSet(default=[2, 4, 8, 16, 32, 64, 128], choices=[2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128])
config.seek.speeds_backward = ConfigSet(default=[2, 4, 8, 16, 32, 64, 128], choices=[1, 2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128])
config.seek.speeds_slowmotion = ConfigSet(default=[2, 4, 8], choices=[2, 4, 6, 8, 12, 16, 25])
config.seek.enter_forward = ConfigSelection(default = "2", choices = ["2", "4", "6", "8", "12", "16", "24", "32", "48", "64", "96", "128"])
config.seek.enter_backward = ConfigSelection(default = "1", choices = ["1", "2", "4", "6", "8", "12", "16", "24", "32", "48", "64", "96", "128"])
config.seek.on_pause = ConfigSelection(default = "play", choices = [
("play", _("Play")),
("step", _("Single step (GOP)")),
("last", _("Last speed")) ])
config.crash = ConfigSubsection()
config.crash.details = ConfigYesNo(default = True)
config.crash.enabledebug = ConfigYesNo(default = False)
config.crash.debugloglimit = ConfigSelectionNumber(min = 1, max = 10, stepwidth = 1, default = 4, wraparound = True)
config.crash.daysloglimit = ConfigSelectionNumber(min = 1, max = 30, stepwidth = 1, default = 8, wraparound = True)
config.crash.sizeloglimit = ConfigSelectionNumber(min = 1, max = 20, stepwidth = 1, default = 10, wraparound = True)
debugpath = [('/home/root/logs/', '/home/root/')]
for p in harddiskmanager.getMountedPartitions():
if os.path.exists(p.mountpoint):
d = os.path.normpath(p.mountpoint)
if p.mountpoint != '/':
debugpath.append((p.mountpoint + 'logs/', d))
config.crash.debug_path = ConfigSelection(default = "/home/root/logs/", choices = debugpath)
def updatedebug_path(configElement):
if not os.path.exists(config.crash.debug_path.value):
os.mkdir(config.crash.debug_path.value,0755)
config.crash.debug_path.addNotifier(updatedebug_path, immediate_feedback = False)
config.usage.timerlist_finished_timer_position = ConfigSelection(default = "end", choices = [("beginning", _("at beginning")), ("end", _("at end"))])
def updateEnterForward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_forward, configElement.value)
config.seek.speeds_forward.addNotifier(updateEnterForward, immediate_feedback = False)
def updateEnterBackward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_backward, configElement.value)
config.seek.speeds_backward.addNotifier(updateEnterBackward, immediate_feedback = False)
def updateEraseSpeed(el):
eBackgroundFileEraser.getInstance().setEraseSpeed(int(el.value))
def updateEraseFlags(el):
eBackgroundFileEraser.getInstance().setEraseFlags(int(el.value))
config.misc.erase_speed = ConfigSelection(default="20", choices = [
("10", "10 MB/s"),
("20", "20 MB/s"),
("50", "50 MB/s"),
("100", "100 MB/s")])
config.misc.erase_speed.addNotifier(updateEraseSpeed, immediate_feedback = False)
config.misc.erase_flags = ConfigSelection(default="1", choices = [
("0", _("Disable")),
("1", _("Internal hdd only")),
("3", _("Everywhere"))])
config.misc.erase_flags.addNotifier(updateEraseFlags, immediate_feedback = False)
if SystemInfo["ZapMode"]:
def setZapmode(el):
file = open(zapfile, "w")
file.write(el.value)
file.close()
if os.path.exists("/proc/stb/video/zapping_mode"):
zapfile = "/proc/stb/video/zapping_mode"
else:
zapfile = "/proc/stb/video/zapmode"
zapoptions = [("mute", _("Black screen")), ("hold", _("Hold screen")), ("mutetilllock", _("Black screen till locked")), ("holdtilllock", _("Hold till locked"))]
config.misc.zapmode = ConfigSelection(default = "mute", choices = zapoptions )
config.misc.zapmode.addNotifier(setZapmode, immediate_feedback = False)
config.usage.historymode = ConfigSelection(default = "1", choices = [("1", _("Show menu")), ("0", _("Just zap")), ("2", _("Show Zap-History Browser"))])
config.usage.bookmarkmode = ConfigSelection(default = "0", choices = [("1", _("Show EMC")), ("0", _("Show Movielist")), ("2", _("Show Simple Movie List"))])
config.subtitles = ConfigSubsection()
config.subtitles.ttx_subtitle_colors = ConfigSelection(default = "1", choices = [
("0", _("original")),
("1", _("white")),
("2", _("yellow")) ])
config.subtitles.ttx_subtitle_original_position = ConfigYesNo(default = False)
config.subtitles.subtitle_position = ConfigSelection( choices = ["0", "10", "20", "30", "40", "50", "60", "70", "80", "90", "100", "150", "200", "250", "300", "350", "400", "450"], default = "50")
config.subtitles.subtitle_alignment = ConfigSelection(choices = [("left", _("left")), ("center", _("center")), ("right", _("right"))], default = "center")
config.subtitles.subtitle_rewrap = ConfigYesNo(default = False)
config.subtitles.subtitle_borderwidth = ConfigSelection(choices = ["1", "2", "3", "4", "5"], default = "3")
config.subtitles.subtitle_fontsize = ConfigSelection(choices = ["16", "18", "20", "22", "24", "26", "28", "30", "32", "34", "36", "38", "40", "42", "44", "46", "48", "50", "52", "54"], default = "34")
subtitle_delay_choicelist = []
for i in range(-900000, 1845000, 45000):
if i == 0:
subtitle_delay_choicelist.append(("0", _("No delay")))
else:
subtitle_delay_choicelist.append(("%d" % i, "%2.1f sec" % (i / 90000.)))
config.subtitles.subtitle_noPTSrecordingdelay = ConfigSelection(default = "315000", choices = subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_yellow = ConfigYesNo(default = False)
config.subtitles.dvb_subtitles_original_position = ConfigSelection(default = "0", choices = [("0", _("Original")), ("1", _("Fixed")), ("2", _("Relative"))])
config.subtitles.dvb_subtitles_centered = ConfigYesNo(default = True)
config.subtitles.subtitle_bad_timing_delay = ConfigSelection(default = "0", choices = subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_backtrans = ConfigSelection(default = "0", choices = [
("0", _("No transparency")),
("25", "10%"),
("50", "20%"),
("75", "30%"),
("100", "40%"),
("125", "50%"),
("150", "60%"),
("175", "70%"),
("200", "80%"),
("225", "90%"),
("255", _("Full transparency"))])
config.subtitles.pango_subtitle_colors = ConfigSelection(default = "0", choices = [
("0", _("alternative")),
("1", _("white")),
("2", _("yellow")) ])
config.subtitles.pango_subtitles_delay = ConfigSelection(default = "0", choices = subtitle_delay_choicelist)
config.subtitles.pango_subtitles_fps = ConfigSelection(default = "1", choices = [
("1", _("Original")),
("23976", _("23.976")),
("24000", _("24")),
("25000", _("25")),
("29970", _("29.97")),
("30000", _("30"))])
config.subtitles.pango_autoturnon = ConfigYesNo(default = True)
config.autolanguage = ConfigSubsection()
audio_language_choices=[
("---", _("None")),
("und", _("Undetermined")),
("orj dos ory org esl qaa und mis mul ORY ORJ Audio_ORJ", _("Original")),
("ara", _("Arabic")),
("eus baq", _("Basque")),
("bul", _("Bulgarian")),
("hrv", _("Croatian")),
("ces cze", _("Czech")),
("dan", _("Danish")),
("dut ndl Dutch", _("Dutch")),
("eng qaa Englisch", _("English")),
("est", _("Estonian")),
("fin", _("Finnish")),
("fra fre", _("French")),
("deu ger", _("German")),
("ell gre", _("Greek")),
("heb", _("Hebrew")),
("hun", _("Hungarian")),
("ita", _("Italian")),
("lav", _("Latvian")),
("lit", _("Lithuanian")),
("ltz", _("Luxembourgish")),
("nor", _("Norwegian")),
("pol", _("Polish")),
("por", _("Portuguese")),
("fas per", _("Persian")),
("ron rum", _("Romanian")),
("rus", _("Russian")),
("srp", _("Serbian")),
("slk slo", _("Slovak")),
("slv", _("Slovenian")),
("spa", _("Spanish")),
("swe", _("Swedish")),
("tha", _("Thai")),
("tur Audio_TUR", _("Turkish"))]
def setEpgLanguage(configElement):
eServiceEvent.setEPGLanguage(configElement.value)
config.autolanguage.audio_epglanguage = ConfigSelection(audio_language_choices[:1] + audio_language_choices [2:], default="---")
config.autolanguage.audio_epglanguage.addNotifier(setEpgLanguage)
def setEpgLanguageAlternative(configElement):
eServiceEvent.setEPGLanguageAlternative(configElement.value)
config.autolanguage.audio_epglanguage_alternative = ConfigSelection(audio_language_choices[:1] + audio_language_choices [2:], default="---")
config.autolanguage.audio_epglanguage_alternative.addNotifier(setEpgLanguageAlternative)
config.autolanguage.audio_autoselect1 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect2 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect3 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect4 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_defaultac3 = ConfigYesNo(default = True)
config.autolanguage.audio_defaultddp = ConfigYesNo(default = False)
config.autolanguage.audio_usecache = ConfigYesNo(default = True)
subtitle_language_choices = audio_language_choices[:1] + audio_language_choices [2:]
config.autolanguage.subtitle_autoselect1 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect2 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect3 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect4 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_hearingimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultdvb = ConfigYesNo(default = False)
config.autolanguage.subtitle_usecache = ConfigYesNo(default = True)
config.autolanguage.equal_languages = ConfigSelection(default = "15", choices = [
("0", _("None")),("1", "1"),("2", "2"),("3", "1,2"),
("4", "3"),("5", "1,3"),("6", "2,3"),("7", "1,2,3"),
("8", "4"),("9", "1,4"),("10", "2,4"),("11", "1,2,4"),
("12", "3,4"),("13", "1,3,4"),("14", "2,3,4"),("15", _("All"))])
config.logmanager = ConfigSubsection()
config.logmanager.showinextensions = ConfigYesNo(default = False)
config.logmanager.user = ConfigText(default='', fixed_size=False)
config.logmanager.useremail = ConfigText(default='', fixed_size=False)
config.logmanager.usersendcopy = ConfigYesNo(default = True)
config.logmanager.path = ConfigText(default = "/")
config.logmanager.additionalinfo = NoSave(ConfigText(default = ""))
config.logmanager.sentfiles = ConfigLocations(default='')
config.plisettings = ConfigSubsection()
config.plisettings.Subservice = ConfigYesNo(default = True)
config.plisettings.ShowPressedButtons = ConfigYesNo(default = False)
config.plisettings.ColouredButtons = ConfigYesNo(default = True)
config.plisettings.InfoBarEpg_mode = ConfigSelection(default="0", choices = [
("0", _("as plugin in extended bar")),
("1", _("with long OK press")),
("2", _("with exit button")),
("3", _("with left/right buttons"))])
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CoolTVGuide/plugin.pyo"):
config.plisettings.PLIEPG_mode = ConfigSelection(default="cooltvguide", choices = [
("pliepg", _("Show Graphical EPG")),
("single", _("Show Single EPG")),
("multi", _("Show Multi EPG")),
("eventview", _("Show Eventview")),
("cooltvguide", _("Show CoolTVGuide")),
("etportal", _("Show EtPortal"))])
config.plisettings.PLIINFO_mode = ConfigSelection(default="coolinfoguide", choices = [
("eventview", _("Show Eventview")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG")),
("coolsingleguide", _("Show CoolSingleGuide")),
("coolinfoguide", _("Show CoolInfoGuide")),
("cooltvguide", _("Show CoolTVGuide")),
("etportal", _("Show EtPortal"))])
config.plisettings.PLIFAV_mode = ConfigSelection(default="coolinfoguide", choices = [
("eventview", _("Show Eventview")),
("showfavourites", _("Show Favourites")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG")),
("coolsingleguide", _("Show CoolSingleGuide")),
("coolinfoguide", _("Show CoolInfoGuide")),
("cooltvguide", _("Show CoolTVGuide")),
("emc", _("Show Enhanced Movie Center")),
("mediaportal", _("Show Media Portal")),
("dreamplex", _("Show DreamPlex")),
("etportal", _("Show EtPortal"))])
else:
config.plisettings.PLIEPG_mode = ConfigSelection(default="pliepg", choices = [
("pliepg", _("Show Graphical EPG")),
("single", _("Show Single EPG")),
("multi", _("Show Multi EPG")),
("eventview", _("Show Eventview"))])
config.plisettings.PLIINFO_mode = ConfigSelection(default="eventview", choices = [
("eventview", _("Show Eventview")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG"))])
config.plisettings.PLIFAV_mode = ConfigSelection(default="eventview", choices = [
("eventview", _("Show Eventview")),
("epgpress", _("Show EPG")),
("single", _("Show Single EPG"))])
config.epgselection = ConfigSubsection()
config.epgselection.sort = ConfigSelection(default="0", choices = [("0", _("Time")),("1", _("Alphanumeric"))])
config.epgselection.overjump = ConfigYesNo(default = False)
config.epgselection.infobar_type_mode = ConfigSelection(choices = [("graphics",_("Multi EPG")), ("single", _("Single EPG"))], default = "graphics")
if SystemInfo.get("NumVideoDecoders", 1) > 1:
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen")), ("2", _("PiP"))], default = "1")
else:
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen"))], default = "1")
config.epgselection.infobar_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.infobar_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.infobar_itemsperpage = ConfigSelectionNumber(default = 2, stepwidth = 1, min = 1, max = 4, wraparound = True)
if SystemInfo.get("NumVideoDecoders", 1) > 1:
if HardwareInfo().is_nextgen():
previewdefault = "2"
else:
previewdefault = "1"
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen")), ("2", _("PiP"))], default = previewdefault)
else:
config.epgselection.infobar_preview_mode = ConfigSelection(choices = [("0",_("Disabled")), ("1", _("Fullscreen"))], default = "1")
config.epgselection.infobar_roundto = ConfigSelection(default = "15", choices = [("15", _("%d minutes") % 15), ("30", _("%d minutes") % 30), ("60", _("%d minutes") % 60)])
config.epgselection.infobar_prevtime = ConfigClock(default = time())
config.epgselection.infobar_prevtimeperiod = ConfigSelection(default = "300", choices = [("60", _("%d minutes") % 60), ("90", _("%d minutes") % 90), ("120", _("%d minutes") % 120), ("150", _("%d minutes") % 150), ("180", _("%d minutes") % 180), ("210", _("%d minutes") % 210), ("240", _("%d minutes") % 240), ("270", _("%d minutes") % 270), ("300", _("%d minutes") % 300)])
config.epgselection.infobar_primetimehour = ConfigSelectionNumber(default = 20, stepwidth = 1, min = 00, max = 23, wraparound = True)
config.epgselection.infobar_primetimemins = ConfigSelectionNumber(default = 00, stepwidth = 1, min = 00, max = 59, wraparound = True)
config.epgselection.infobar_servicetitle_mode = ConfigSelection(default = "servicename", choices = [("servicename", _("Service Name")),("picon", _("Picon")),("picon+servicename", _("Picon and Service Name")) ])
config.epgselection.infobar_servfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.infobar_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.infobar_timelinefs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.infobar_timeline24h = ConfigYesNo(default = True)
config.epgselection.infobar_servicewidth = ConfigSelectionNumber(default = 250, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.infobar_piconwidth = ConfigSelectionNumber(default = 100, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.infobar_infowidth = ConfigSelectionNumber(default = 50, stepwidth = 25, min = 0, max = 150, wraparound = True)
config.epgselection.enhanced_preview_mode = ConfigYesNo(default = True)
config.epgselection.enhanced_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.enhanced_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.enhanced_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.enhanced_itemsperpage = ConfigSelectionNumber(default = 18, stepwidth = 1, min = 12, max = 40, wraparound = True)
config.epgselection.multi_showbouquet = ConfigYesNo(default = False)
config.epgselection.multi_preview_mode = ConfigYesNo(default = True)
config.epgselection.multi_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.multi_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.multi_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.multi_itemsperpage = ConfigSelectionNumber(default = 18, stepwidth = 1, min = 12, max = 40, wraparound = True)
config.epgselection.graph_showbouquet = ConfigYesNo(default = False)
config.epgselection.graph_preview_mode = ConfigYesNo(default = True)
config.epgselection.graph_type_mode = ConfigSelection(choices = [("graphics",_("Graphics")), ("text", _("Text"))], default = "graphics")
config.epgselection.graph_ok = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap")
config.epgselection.graph_oklong = ConfigSelection(choices = [("Zap",_("Zap")), ("Zap + Exit", _("Zap + Exit"))], default = "Zap + Exit")
config.epgselection.graph_info = ConfigSelection(choices = [("Channel Info", _("Channel Info")), ("Single EPG", _("Single EPG"))], default = "Channel Info")
config.epgselection.graph_infolong = ConfigSelection(choices = [("Channel Info", _("Channel Info")), ("Single EPG", _("Single EPG"))], default = "Single EPG")
config.epgselection.graph_roundto = ConfigSelection(default = "15", choices = [("15", _("%d minutes") % 15), ("30", _("%d minutes") % 30), ("60", _("%d minutes") % 60)])
config.epgselection.graph_prevtime = ConfigClock(default = time())
config.epgselection.graph_prevtimeperiod = ConfigSelection(default = "180", choices = [("60", _("%d minutes") % 60), ("90", _("%d minutes") % 90), ("120", _("%d minutes") % 120), ("150", _("%d minutes") % 150), ("180", _("%d minutes") % 180), ("210", _("%d minutes") % 210), ("240", _("%d minutes") % 240), ("270", _("%d minutes") % 270), ("300", _("%d minutes") % 300)])
config.epgselection.graph_primetimehour = ConfigSelectionNumber(default = 20, stepwidth = 1, min = 00, max = 23, wraparound = True)
config.epgselection.graph_primetimemins = ConfigSelectionNumber(default = 00, stepwidth = 1, min = 00, max = 59, wraparound = True)
config.epgselection.graph_servicetitle_mode = ConfigSelection(default = "picon+servicename", choices = [("servicename", _("Service Name")),("picon", _("Picon")),("picon+servicename", _("Picon and Service Name")) ])
config.epgselection.graph_channel1 = ConfigYesNo(default = False)
config.epgselection.graph_servfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.graph_eventfs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.graph_timelinefs = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 10, wraparound = True)
config.epgselection.graph_timeline24h = ConfigYesNo(default = True)
config.epgselection.graph_itemsperpage = ConfigSelectionNumber(default = 8, stepwidth = 1, min = 3, max = 16, wraparound = True)
config.epgselection.graph_pig = ConfigYesNo(default = True)
config.epgselection.graph_heightswitch = NoSave(ConfigYesNo(default = False))
config.epgselection.graph_servicewidth = ConfigSelectionNumber(default = 250, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.graph_piconwidth = ConfigSelectionNumber(default = 100, stepwidth = 1, min = 70, max = 500, wraparound = True)
config.epgselection.graph_infowidth = ConfigSelectionNumber(default = 50, stepwidth = 25, min = 0, max = 150, wraparound = True)
softcams = sorted(filter(lambda x: x.startswith('softcam.'), os.listdir("/etc/init.d/")))
config.oscaminfo = ConfigSubsection()
config.oscaminfo.showInExtensions = ConfigYesNo(default=False)
config.oscaminfo.userdatafromconf = ConfigYesNo(default = False)
config.oscaminfo.autoupdate = ConfigYesNo(default = False)
config.oscaminfo.username = ConfigText(default = "username", fixed_size = False, visible_width=12)
config.oscaminfo.password = ConfigPassword(default = "password", fixed_size = False)
config.oscaminfo.ip = ConfigIP( default = [ 127,0,0,1 ], auto_jump=True)
config.oscaminfo.port = ConfigInteger(default = 16002, limits=(0,65536) )
config.oscaminfo.intervall = ConfigSelectionNumber(min = 1, max = 600, stepwidth = 1, default = 10, wraparound = True)
SystemInfo["OScamInstalled"] = False
config.cccaminfo = ConfigSubsection()
config.cccaminfo.showInExtensions = ConfigYesNo(default=False)
config.cccaminfo.serverNameLength = ConfigSelectionNumber(min = 10, max = 100, stepwidth = 1, default = 22, wraparound = True)
config.cccaminfo.name = ConfigText(default="Profile", fixed_size=False)
config.cccaminfo.ip = ConfigText(default="192.168.2.12", fixed_size=False)
config.cccaminfo.username = ConfigText(default="", fixed_size=False)
config.cccaminfo.password = ConfigText(default="", fixed_size=False)
config.cccaminfo.port = ConfigInteger(default=16001, limits=(1, 65535))
config.cccaminfo.profile = ConfigText(default="", fixed_size=False)
config.cccaminfo.ecmInfoEnabled = ConfigYesNo(default=True)
config.cccaminfo.ecmInfoTime = ConfigSelectionNumber(min = 1, max = 10, stepwidth = 1, default = 5, wraparound = True)
config.cccaminfo.ecmInfoForceHide = ConfigYesNo(default=True)
config.cccaminfo.ecmInfoPositionX = ConfigInteger(default=50)
config.cccaminfo.ecmInfoPositionY = ConfigInteger(default=50)
config.cccaminfo.blacklist = ConfigText(default="/media/cf/CCcamInfo.blacklisted", fixed_size=False)
config.cccaminfo.profiles = ConfigText(default="/media/cf/CCcamInfo.profiles", fixed_size=False)
SystemInfo["CCcamInstalled"] = False
if os.path.islink('/etc/init.d/softcam'):
for softcam in softcams:
if "cccam" in os.readlink('/etc/init.d/softcam').lower():
config.cccaminfo.showInExtensions = ConfigYesNo(default=True)
SystemInfo["CCcamInstalled"] = True
elif "oscam" in os.readlink('/etc/init.d/softcam').lower():
config.oscaminfo.showInExtensions = ConfigYesNo(default=True)
SystemInfo["OScamInstalled"] = True
config.streaming = ConfigSubsection()
config.streaming.stream_ecm = ConfigYesNo(default = False)
config.streaming.descramble = ConfigYesNo(default = True)
config.streaming.stream_eit = ConfigYesNo(default = True)
config.streaming.stream_ait = ConfigYesNo(default = True)
config.pluginbrowser = ConfigSubsection()
config.pluginbrowser.po = ConfigYesNo(default = False)
config.pluginbrowser.src = ConfigYesNo(default = False)
def updateChoices(sel, choices):
if choices:
defval = None
val = int(sel.value)
if not val in choices:
tmp = choices[:]
tmp.reverse()
for x in tmp:
if x < val:
defval = str(x)
break
sel.setChoices(map(str, choices), defval)
def preferredPath(path):
if config.usage.setup_level.index < 2 or path == "<default>":
return None # config.usage.default_path.value, but delay lookup until usage
elif path == "<current>":
return config.movielist.last_videodir.value
elif path == "<timer>":
return config.movielist.last_timer_videodir.value
else:
return path
def preferredTimerPath():
return preferredPath(config.usage.timer_path.value)
def preferredInstantRecordPath():
return preferredPath(config.usage.instantrec_path.value)
def defaultMoviePath():
return defaultRecordingLocation(config.usage.default_path.value)
def refreshServiceList(configElement = None):
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance is not None:
servicelist = InfoBarInstance.servicelist
if servicelist:
servicelist.setMode()
| gpl-2.0 |
Peter92/MouseTrack | mousetracks/utils/sockets.py | 1 | 2511 | """This is part of the Mouse Tracks Python application.
Source: https://github.com/Peter92/MouseTracks
"""
#Easy to use wrappers for sockets
from __future__ import absolute_import
import psutil
import socket
import struct
from select import select
from .compatibility import pickle
def send_msg(sock, msg):
"""Prefix each messge with length."""
msg = pickle.dumps(msg)
msg = struct.pack('>I', len(msg)) + msg
sock.sendall(msg)
def recv_msg(sock):
"""Receive the message."""
#Read message length
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
#Read message data
return pickle.loads(recvall(sock, msglen))
def recvall(sock, n):
"""Receive socket data and detect if the connection was closed."""
data = ''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def msg_empty(sock):
"""Detect if socket is empty."""
return not select([sock],[],[],0)[0]
def get_ip(sock):
"""Get the IP address the socket is bound to."""
return sock.getsockname()[0]
def get_port(sock):
"""Get the port the socket is bound to."""
return sock.getsockname()[1]
def get_free_port():
"""Find a free port resulting from using port 0."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
return port
def force_close_port(port, process_name=None):
"""Terminate a process that is bound to a port.
The process name can be set (eg. python), which will
ignore any other process that doesn't start with it.
"""
for proc in psutil.process_iter():
for conn in proc.connections():
if conn.laddr[1] == port:
#Don't close if it belongs to SYSTEM
#On windows using .username() results in AccessDenied
#TODO: Needs testing on other operating systems
try:
proc.username()
except psutil.AccessDenied:
pass
else:
if process_name is None or proc.name().startswith(process_name):
try:
proc.kill()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass | gpl-3.0 |
BackupGGCode/python-for-android | python3-alpha/python3-src/Lib/unittest/test/support.py | 770 | 3379 | import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
| apache-2.0 |
namccart/gnuradio | gr-digital/python/digital/qa_burst_shaper.py | 18 | 14818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks, digital
import pmt
import numpy as np
import sys
def make_length_tag(offset, length):
return gr.python_to_tag({'offset' : offset,
'key' : pmt.intern('packet_len'),
'value' : pmt.from_long(length),
'srcid' : pmt.intern('qa_burst_shaper')})
def make_tag(offset, key, value):
return gr.python_to_tag({'offset' : offset,
'key' : pmt.intern(key),
'value' : value,
'srcid' : pmt.intern('qa_burst_shaper')})
def compare_tags(a, b):
return a.offset == b.offset and pmt.equal(a.key, b.key) and \
pmt.equal(a.value, b.value)
class qa_burst_shaper (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_ff (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length - len(window)), window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10,
dtype=complex) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5, dtype=complex),
-4.0*np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad, dtype=complex), window[0:5],
np.ones(length - len(window), dtype=complex),
window[5:10], np.zeros(postpad,
dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_ff_with_phasing (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5)
for i in xrange(5):
phasing[i] = ((-1.0)**i)
expected = np.concatenate((np.zeros(prepad), phasing*window[0:5],
np.ones(length), phasing*window[5:10],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_cc_with_phasing (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10,
dtype=complex) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5, dtype=complex),
-4.0*np.ones(5, dtype=complex)))
tags = (make_length_tag(0, length),)
phasing = np.zeros(5, dtype=complex)
for i in xrange(5):
phasing[i] = complex((-1.0)**i)
expected = np.concatenate((np.zeros(prepad, dtype=complex),
phasing*window[0:5],
np.ones(length, dtype=complex),
phasing*window[5:10],
np.zeros(postpad, dtype=complex)))
etag = make_length_tag(0, length + prepad + postpad + len(window))
# flowgraph
source = blocks.vector_source_c(data, tags=tags)
shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_c()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_odd_window (self):
prepad = 10
postpad = 10
length = 20
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
-4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:6],
np.ones(length - len(window) - 1),
window[5:11], np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_short_burst (self):
prepad = 10
postpad = 10
length = 9
data = np.ones(length + 10) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
-4.0*np.ones(5)))
tags = (make_length_tag(0, length),)
expected = np.concatenate((np.zeros(prepad), window[0:4],
np.ones(1), window[5:9],
np.zeros(postpad)))
etag = make_length_tag(0, length + prepad + postpad)
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
self.assertTrue(compare_tags(sink.tags()[0], etag))
def test_consecutive_bursts (self):
prepad = 10
postpad = 10
length1 = 15
length2 = 25
data = np.concatenate((np.ones(length1), -1.0*np.ones(length2),
np.zeros(10))) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(0, length1), make_length_tag(length1, length2))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0*window[0:5],
-1.0*np.ones(length2 - len(window)),
-1.0*window[5:10], np.zeros(postpad)))
etags = (make_length_tag(0, length1 + prepad + postpad),
make_length_tag(length1 + prepad + postpad,
length2 + prepad + postpad))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in xrange(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_gap (self):
prepad = 10
postpad = 10
length = 20
gap_len = 5
data = np.arange(2*length + 10,
dtype=float) # need 10 more to push things through
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
ewindow = window * np.array([1,-1,1,-1,1,1,-1,1,-1,1],dtype=float)
tags = (make_length_tag(0, length),
make_length_tag(length + gap_len, length))
expected = np.concatenate((np.zeros(prepad), ewindow[0:5],
np.arange(0, length, dtype=float),
ewindow[5:10], np.zeros(postpad),
np.zeros(prepad), ewindow[0:5],
np.arange(length + gap_len,
2*length + gap_len, dtype=float),
ewindow[5:10], np.zeros(postpad)))
burst_len = length + len(window) + prepad + postpad
etags = (make_length_tag(0, burst_len),
make_length_tag(burst_len, burst_len))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad,
insert_phasing=True)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for i in xrange(len(etags)):
self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
def test_tag_propagation (self):
prepad = 10
postpad = 10
length1 = 15
length2 = 25
gap_len = 5
lentag1_offset = 0
lentag2_offset = length1 + gap_len
tag1_offset = 0 # accompanies first length tag
tag2_offset = length1 + gap_len # accompanies second length tag
tag3_offset = 2 # in ramp-up state
tag4_offset = length1 + 2 # in gap; tag will be dropped
tag5_offset = length1 + gap_len + 7 # in copy state
data = np.concatenate((np.ones(length1), np.zeros(gap_len),
-1.0*np.ones(length2), np.zeros(10)))
window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
tags = (make_length_tag(lentag1_offset, length1),
make_length_tag(lentag2_offset, length2),
make_tag(tag1_offset, 'head', pmt.intern('tag1')),
make_tag(tag2_offset, 'head', pmt.intern('tag2')),
make_tag(tag3_offset, 'body', pmt.intern('tag3')),
make_tag(tag4_offset, 'body', pmt.intern('tag4')),
make_tag(tag5_offset, 'body', pmt.intern('tag5')))
expected = np.concatenate((np.zeros(prepad), window[0:5],
np.ones(length1 - len(window)), window[5:10],
np.zeros(postpad + prepad), -1.0*window[0:5],
-1.0*np.ones(length2 - len(window)),
-1.0*window[5:10], np.zeros(postpad)))
elentag1_offset = 0
elentag2_offset = length1 + prepad + postpad
etag1_offset = 0
etag2_offset = elentag2_offset
etag3_offset = prepad + tag3_offset
etag5_offset = 2*prepad + postpad + tag5_offset - gap_len
etags = (make_length_tag(elentag1_offset, length1 + prepad + postpad),
make_length_tag(elentag2_offset, length2 + prepad + postpad),
make_tag(etag1_offset, 'head', pmt.intern('tag1')),
make_tag(etag2_offset, 'head', pmt.intern('tag2')),
make_tag(etag3_offset, 'body', pmt.intern('tag3')),
make_tag(etag5_offset, 'body', pmt.intern('tag5')))
# flowgraph
source = blocks.vector_source_f(data, tags=tags)
shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
post_padding=postpad)
sink = blocks.vector_sink_f()
self.tb.connect(source, shaper, sink)
self.tb.run ()
# checks
self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
for x, y in zip(sorted(sink.tags(), key=gr.tag_t_offset_compare_key()),
sorted(etags, key=gr.tag_t_offset_compare_key())):
self.assertTrue(compare_tags(x, y))
if __name__ == '__main__':
gr_unittest.run(qa_burst_shaper, "qa_burst_shaper.xml")
| gpl-3.0 |
pombredanne/product-definition-center | pdc/apps/release/lib.py | 3 | 6715 | #
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.core.exceptions import ValidationError
from django.db import transaction
import json
import productmd
from productmd.common import create_release_id
from pdc.apps.common import hacks as common_hacks
from pdc.apps.common import models as common_models
from . import models
def _maybe_log(request, created, obj):
"""
Optionally create an entry in changeset.
"""
if created:
request.changeset.add(obj.__class__.__name__,
obj.pk,
'null',
json.dumps(obj.export()))
def _logged_get_or_create(request, model, **kwargs):
"""
Wrapper around `get_or_create` that also creates an entry in changeset.
"""
obj, created = model.objects.get_or_create(**kwargs)
_maybe_log(request, created, obj)
return obj, created
def get_or_create_integrated_release(request, orig_release, release):
"""
Given a PDC release and a release retrieved from compose info specified in
a variant, return the release for integrated layered product. The Product,
ProductVersion and BaseProduct instances will also be created if necessary.
"""
integrated_base_product, _ = _logged_get_or_create(
request, models.BaseProduct,
name=orig_release.name,
short=orig_release.short,
version=orig_release.version.split('.')[0],
release_type=orig_release.release_type
)
integrated_product, _ = _logged_get_or_create(
request, models.Product,
name=release.name,
short=release.short.lower()
)
integrated_product_version, _ = _logged_get_or_create(
request, models.ProductVersion,
product=integrated_product,
name=release.name,
short=release.short.lower(),
version=release.version.split('.')[0]
)
try:
integrated_release, _ = _logged_get_or_create(
request, models.Release,
name=release.name,
short=release.short.lower(),
release_type=orig_release.release_type,
version=release.version,
base_product=integrated_base_product,
integrated_with=orig_release,
product_version=integrated_product_version
)
except ValidationError:
release_id = create_release_id(
release.short.lower(),
release.version,
orig_release.release_type.short,
integrated_base_product.short,
integrated_base_product.version,
integrated_base_product.release_type.short,
)
msg = ('Failed to create release {} for integrated layered product.' +
' A conflicting release already exists.' +
' There is likely a version mismatch between the imported' +
' release and its layered integrated product in the composeinfo.')
raise ValidationError(msg.format(release_id))
return integrated_release
@transaction.atomic
def release__import_from_composeinfo(request, composeinfo_json):
"""
Import release including variants and architectures from composeinfo json.
"""
ci = productmd.composeinfo.ComposeInfo()
common_hacks.deserialize_wrapper(ci.deserialize, composeinfo_json)
if ci.release.is_layered:
release_type_obj = models.ReleaseType.objects.get(short=getattr(ci.base_product, "type", "ga"))
base_product_obj, _ = _logged_get_or_create(
request, models.BaseProduct,
name=ci.base_product.name,
short=ci.base_product.short.lower(),
version=ci.base_product.version,
release_type=release_type_obj,
)
else:
base_product_obj = None
product_obj, _ = _logged_get_or_create(
request, models.Product,
name=ci.release.name,
short=ci.release.short.lower()
)
product_version_obj, _ = _logged_get_or_create(
request, models.ProductVersion,
product=product_obj,
name=ci.release.name,
short=ci.release.short.lower(),
version=ci.release.major_version
)
release_type_obj = models.ReleaseType.objects.get(short=getattr(ci.release, "type", "ga"))
release_obj, _ = _logged_get_or_create(
request, models.Release,
name=ci.release.name,
short=ci.release.short.lower(),
version=ci.release.version,
base_product=base_product_obj,
release_type=release_type_obj,
product_version=product_version_obj,
)
# if not created:
# raise RuntimeError("Release already exists: %s" % release_obj)
# We can't log variants immediately after they are created, as their export
# includes architectures. Therefore they are collected in this list and
# logged once import is done. This also nicely abstracts integrated
# variants that may not be present.
add_to_changelog = []
for variant in ci.variants.get_variants(recursive=True):
variant_type = models.VariantType.objects.get(name=variant.type)
release = variant.release
integrated_variant = None
if release.name:
integrated_release = get_or_create_integrated_release(
request,
release_obj,
release
)
integrated_variant, created = models.Variant.objects.get_or_create(
release=integrated_release,
variant_id=variant.id,
variant_uid=variant.uid,
variant_name=variant.name,
variant_type=models.VariantType.objects.get(name='variant')
)
if created:
add_to_changelog.append(integrated_variant)
variant_obj, created = models.Variant.objects.get_or_create(
release=release_obj,
variant_id=variant.id,
variant_uid=variant.uid,
variant_name=variant.name,
variant_type=variant_type,
)
if created:
add_to_changelog.append(variant_obj)
for arch in variant.arches:
arch_obj = common_models.Arch.objects.get(name=arch)
var_arch_obj, _ = models.VariantArch.objects.get_or_create(
arch=arch_obj,
variant=variant_obj
)
if integrated_variant:
models.VariantArch.objects.get_or_create(
arch=arch_obj,
variant=integrated_variant
)
for obj in add_to_changelog:
_maybe_log(request, True, obj)
return release_obj
| mit |
drufat/sympy | sympy/functions/special/tests/test_gamma_functions.py | 33 | 16080 | from sympy import (
Symbol, gamma, I, oo, nan, zoo, factorial, sqrt, Rational, log,
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin,
cos, O, lowergamma, exp, erf, erfc, exp_polar, harmonic, zeta,conjugate)
from sympy.core.function import ArgumentIndexError
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
verify_numerically as tn)
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
w = Symbol('w', real=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1, 2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(Rational(
-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(
-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(
14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(
17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(
19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x - 1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x + 1)*gamma(x)
assert conjugate(gamma(x)) == gamma(conjugate(x))
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
assert gamma(3*exp_polar(I*pi)/4).is_nonnegative is False
assert gamma(3*exp_polar(I*pi)/4).is_nonpositive is True
# Issue 8526
k = Symbol('k', integer=True, nonnegative=True)
assert isinstance(gamma(k), gamma)
assert gamma(-k) == zoo
def test_gamma_rewrite():
assert gamma(n).rewrite(factorial) == factorial(n - 1)
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/(x + 1) + EulerGamma - 1 + (x + 1)*(-1 - pi**2/12 - EulerGamma**2/2 + \
EulerGamma) + (x + 1)**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 - \
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O((x + 1)**3, (x, -1))
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, y).diff(y) == y**(x - 1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert td(lowergamma(x, randcplx()), x)
assert lowergamma(x, y).diff(x) == \
gamma(x)*polygamma(0, x) - uppergamma(x, y)*log(y) \
- meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(S(1)/3, lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert conjugate(lowergamma(x, y)) == lowergamma(conjugate(x), conjugate(y))
assert conjugate(lowergamma(x, 0)) == conjugate(lowergamma(x, 0))
assert conjugate(lowergamma(x, -oo)) == conjugate(lowergamma(x, -oo))
assert lowergamma(
x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(
k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x - 1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
assert uppergamma(S.Half, x) == sqrt(pi)*erfc(sqrt(x))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(S(1)/3, uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + \
gamma(y)*(1 - exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert conjugate(uppergamma(x, y)) == uppergamma(conjugate(x), conjugate(y))
assert conjugate(uppergamma(x, 0)) == gamma(conjugate(x))
assert conjugate(uppergamma(x, -oo)) == conjugate(uppergamma(x, -oo))
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
def test_polygamma():
from sympy import I
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(0, -oo) == oo
assert polygamma(0, I*oo) == oo
assert polygamma(0, -I*oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369, 20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert polygamma(0, x).rewrite(zeta) == polygamma(0, x)
assert polygamma(1, x).rewrite(zeta) == zeta(2, x)
assert polygamma(2, x).rewrite(zeta) == -2*zeta(3, x)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
assert polygamma(0, x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert polygamma(2, x).rewrite(harmonic) == 2*harmonic(x - 1, 3) - 2*zeta(3)
ni = Symbol("n", integer=True)
assert polygamma(ni, x).rewrite(harmonic) == (-1)**(ni + 1)*(-harmonic(x - 1, ni + 1)
+ zeta(ni + 1))*factorial(ni)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k, exp_polar(2*I*pi)*x) == polygamma(k, x)
# but negative integers are branched!
k = Symbol('n', integer=True)
assert polygamma(k, exp_polar(2*I*pi)*x).args == (k, exp_polar(2*I*pi)*x)
# Polygamma of order -1 is loggamma:
assert polygamma(-1, x) == loggamma(x)
# But smaller orders are iterated integrals and don't have a special name
assert polygamma(-2, x).func is polygamma
# Test a bug
assert polygamma(0, -x).expand(func=True) == polygamma(0, -x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func=True, basic=False) == e
def test_loggamma():
raises(TypeError, lambda: loggamma(2, 3))
raises(ArgumentIndexError, lambda: loggamma(x).fdiff(2))
assert loggamma(-1) == oo
assert loggamma(-2) == oo
assert loggamma(0) == oo
assert loggamma(1) == 0
assert loggamma(2) == 0
assert loggamma(3) == log(2)
assert loggamma(4) == log(6)
n = Symbol("n", integer=True, positive=True)
assert loggamma(n) == log(gamma(n))
assert loggamma(-n) == oo
assert loggamma(n/2) == log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + S.Half))
from sympy import I
assert loggamma(oo) == oo
assert loggamma(-oo) == zoo
assert loggamma(I*oo) == zoo
assert loggamma(-I*oo) == zoo
assert loggamma(zoo) == zoo
assert loggamma(nan) == nan
L = loggamma(S(16)/3)
E = -5*log(3) + loggamma(S(1)/3) + log(4) + log(7) + log(10) + log(13)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(19/S(4))
E = -4*log(4) + loggamma(S(3)/4) + log(3) + log(7) + log(11) + log(15)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(S(23)/7)
E = -3*log(7) + log(2) + loggamma(S(2)/7) + log(9) + log(16)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(19/S(4)-7)
E = -log(9) - log(5) + loggamma(S(3)/4) + 3*log(4) - 3*I*pi
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(23/S(7)-6)
E = -log(19) - log(12) - log(5) + loggamma(S(2)/7) + 3*log(7) - 3*I*pi
assert expand_func(L).doit() == E
assert L.n() == E.n()
assert loggamma(x).diff(x) == polygamma(0, x)
s1 = loggamma(1/(x + sin(x)) + cos(x)).nseries(x, n=4)
s2 = (-log(2*x) - 1)/(2*x) - log(x/pi)/2 + (4 - log(2*x))*x/24 + O(x**2) + \
log(x)*x**2/2
assert (s1 - s2).expand(force=True).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x - S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert ((s1 - s2).expand(force=True)).removeO() == 0
assert loggamma(x).rewrite('intractable') == log(gamma(x))
s1 = loggamma(x).series(x)
assert s1 == -log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + \
pi**4*x**4/360 + x**5*polygamma(4, 1)/120 + O(x**6)
assert s1 == loggamma(x).rewrite('intractable').series(x)
assert conjugate(loggamma(x)) == loggamma(conjugate(x))
assert conjugate(loggamma(0)) == conjugate(loggamma(0))
assert conjugate(loggamma(1)) == loggamma(conjugate(1))
assert conjugate(loggamma(-oo)) == conjugate(loggamma(-oo))
assert loggamma(x).is_real is None
y, z = Symbol('y', real=True), Symbol('z', imaginary=True)
assert loggamma(y).is_real
assert loggamma(z).is_real is False
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x, n=N).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) == \
-log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) == \
x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=11) == \
2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
def test_issue_8657():
n = Symbol('n', negative=True, integer=True)
m = Symbol('m', integer=True)
o = Symbol('o', positive=True)
p = Symbol('p', negative=True, integer=False)
assert gamma(n).is_real is None
assert gamma(m).is_real is None
assert gamma(o).is_real is True
assert gamma(p).is_real is True
assert gamma(w).is_real is None
def test_issue_8524():
x = Symbol('x', positive=True)
y = Symbol('y', negative=True)
z = Symbol('z', positive=False)
p = Symbol('p', negative=False)
q = Symbol('q', integer=True)
r = Symbol('r', integer=False)
e = Symbol('e', even=True, negative=True)
assert gamma(x).is_positive is True
assert gamma(y).is_positive is None
assert gamma(z).is_positive is None
assert gamma(p).is_positive is None
assert gamma(q).is_positive is None
assert gamma(r).is_positive is None
assert gamma(e + S.Half).is_positive is True
assert gamma(e - S.Half).is_positive is False
| bsd-3-clause |
ajjl/ITK | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/typedef.py | 12 | 1454 | # Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines class that describes C++ typedef declaration
"""
from . import declaration
from . import dependencies
class typedef_t(declaration.declaration_t):
"""describes C++ typedef declaration"""
def __init__(self, name='', type=None):
"""creates class that describes C++ typedef"""
declaration.declaration_t.__init__(self, name)
self._type = type
def _get__cmp__items(self):
"""implementation details"""
return [self.type]
def __eq__(self, other):
if not declaration.declaration_t.__eq__(self, other):
return False
return self.type == other.type
def __hash__(self):
return super.__hash__(self)
@property
def type(self):
"""reference to the original :class:`type <type_t>`"""
return self._type
@type.setter
def type(self, type):
self._type = type
def i_depend_on_them(self, recursive=True):
return [dependencies.dependency_info_t(self, self.type)]
@property
def byte_size(self):
"Size of this type in bytes @type: int"
return self._type.byte_size
@property
def byte_align(self):
"alignment of this type in bytes @type: int"
return self._type.byte_align
| apache-2.0 |
december-soul/beets-plugin-importplaycount | importplaycount.py | 1 | 3144 | # coding=utf-8
# Copyright 2014, Rafael Bodill http://github.com/rafi
# vim: set ts=8 sw=4 tw=80 et :
import logging
import requests
import json
from beets.plugins import BeetsPlugin
from beets import ui
from beets import dbcore
from beets import config
from pprint import pprint
from beets.dbcore import types
log = logging.getLogger('beets')
api_url = 'http://ws.audioscrobbler.com/2.0/?method=track.getInfo&mbid=%s&api_key=%s&format=json'
api_url2 = 'http://ws.audioscrobbler.com/2.0/?method=track.getInfo&artist=%s&track=%s&api_key=%s&format=json'
class LastImportPlugin(BeetsPlugin):
def __init__(self):
super(LastImportPlugin, self).__init__()
config['lastfm'].add({
'user': '',
'api_key': '',
})
self.item_types = {
'lastfm_playcount': types.INTEGER,
'lastfm_listeners': types.INTEGER,
}
def commands(self):
cmd = ui.Subcommand('importplaycount',
help='import global last.fm play-count')
def func(lib, opts, args):
import_lastfm(self,lib,args)
cmd.func = func
return [cmd]
def import_lastfm(self, lib, args):
api_key = config['lastfm']['api_key']
if not api_key:
raise ui.UserError('You must specify an api_key for importplaycount')
log.info('Fetching last.fm play count')
for album in lib.albums():
for track in album.items():
count = int(track.get('lastfm_playcount', 0))
listeners = int(track.get('lastfm_listeners', 0))
if count == 0:
try:
page = fetch_track(track.mb_trackid, api_key)
if "track" not in page:
log.error(u'not found by mbid, try search by name')
page = fetch_track2(track.artist, track.title, api_key)
if "track" in page:
if "playcount" in page["track"]:
try:
new_count = int(page["track"]["playcount"].strip())
except ValueError:
new_count = 0
log.info(u'error convert playcount {0}'.format(page["track"]["playcount"]))
try:
new_listeners = int(page["track"]["listeners"].strip())
except ValueError:
new_listeners = 0
log.info(u'error convert listeners {0}'.format(page["track"]["listeners"]))
log.info(u'{0.artist} - {0.title}: \r\t\t\t\t\t\t\t\t change playcount from {1} to {2} \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t listeners from {3} to {4}'.format(track, count, new_count, listeners, new_listeners))
track['lastfm_playcount'] = new_count
track['lastfm_listeners'] = new_listeners
track.store()
else:
log.error(u'skip {0.artist} - {0.title} Track not found on lastfm, error'.format(track))
pprint(page)
else:
log.error(u'skip {0.artist} - {0.title} Track not found on lastfm'.format(track))
except ValueError:
log.error(u'error {0.artist} - {0.title} Track not found on lastfm'.format(track))
log.info('importplaycount: ... done!')
def fetch_track(mbid, api_key):
return requests.get(api_url % (mbid, api_key)).json()
def fetch_track2(artist, title, api_key):
return requests.get(api_url2 % (artist, title, api_key)).json()
| gpl-2.0 |
nirmeshk/oh-mainline | vendor/packages/Jinja2/jinja2/testsuite/security.py | 415 | 6204 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~
Checks the sandbox and other security features.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment
from jinja2.sandbox import SandboxedEnvironment, \
ImmutableSandboxedEnvironment, unsafe
from jinja2 import Markup, escape
from jinja2.exceptions import SecurityError, TemplateSyntaxError, \
TemplateRuntimeError
from jinja2._compat import text_type
class PrivateStuff(object):
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return 'PrivateStuff'
class PublicStuff(object):
bar = lambda self: 23
_foo = lambda self: 42
def __repr__(self):
return 'PublicStuff'
class SandboxTestCase(JinjaTestCase):
def test_unsafe(self):
env = SandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string("{{ foo.foo() }}").render,
foo=PrivateStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()), '23')
self.assert_raises(SecurityError, env.from_string("{{ foo._foo() }}").render,
foo=PublicStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()), '23')
self.assert_equal(env.from_string("{{ foo.__class__ }}").render(foo=42), '')
self.assert_equal(env.from_string("{{ foo.func_code }}").render(foo=lambda:None), '')
# security error comes from __class__ already.
self.assert_raises(SecurityError, env.from_string(
"{{ foo.__class__.__subclasses__() }}").render, foo=42)
def test_immutable_environment(self):
env = ImmutableSandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string(
'{{ [].append(23) }}').render)
self.assert_raises(SecurityError, env.from_string(
'{{ {1:2}.clear() }}').render)
def test_restricted(self):
env = SandboxedEnvironment()
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for item.attribute in seq %}...{% endfor %}")
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}")
def test_markup_operations(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_template_data(self):
env = Environment(autoescape=True)
t = env.from_string('{% macro say_hello(name) %}'
'<p>Hello {{ name }}!</p>{% endmacro %}'
'{{ say_hello("<blink>foo</blink>") }}')
escaped_out = '<p>Hello <blink>foo</blink>!</p>'
assert t.render() == escaped_out
assert text_type(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello('<blink>foo</blink>') == escaped_out
assert escape(t.module.say_hello('<blink>foo</blink>')) == escaped_out
def test_attr_filter(self):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
self.assert_raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self):
def disable_op(left, right):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'):
env = SandboxedEnvironment()
env.binop_table['+'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(['+'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def test_unary_operator_intercepting(self):
def disable_op(arg):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'):
env = SandboxedEnvironment()
env.unop_table['-'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(['-'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SandboxTestCase))
return suite
| agpl-3.0 |
kindy61/mako | test/test_filters.py | 1 | 7765 | # -*- coding: utf-8 -*-
from mako.template import Template
import unittest
from util import result_lines, flatten_result
class FilterTest(unittest.TestCase):
def test_basic(self):
t = Template("""
${x | myfilter}
""")
assert flatten_result(t.render(x="this is x", myfilter=lambda t: "MYFILTER->%s<-MYFILTER" % t)) == "MYFILTER->this is x<-MYFILTER"
def test_expr(self):
"""test filters that are themselves expressions"""
t = Template("""
${x | myfilter(y)}
""")
def myfilter(y):
return lambda x: "MYFILTER->%s<-%s" % (x, y)
assert flatten_result(t.render(x="this is x", myfilter=myfilter, y="this is y")) == "MYFILTER->this is x<-this is y"
def test_convert_str(self):
"""test that string conversion happens in expressions before sending to filters"""
t = Template("""
${x | trim}
""")
assert flatten_result(t.render(x=5)) == "5"
def test_def(self):
t = Template("""
<%def name="foo()" filter="myfilter">
this is foo
</%def>
${foo()}
""")
assert flatten_result(t.render(x="this is x", myfilter=lambda t: "MYFILTER->%s<-MYFILTER" % t)) == "MYFILTER-> this is foo <-MYFILTER"
def test_import(self):
t = Template("""
<%!
from mako import filters
%>\
trim this string: ${" some string to trim " | filters.trim} continue\
""")
assert t.render().strip()=="trim this string: some string to trim continue"
def test_import_2(self):
t = Template("""
trim this string: ${" some string to trim " | filters.trim} continue\
""", imports=["from mako import filters"])
#print t.code
assert t.render().strip()=="trim this string: some string to trim continue"
def test_encode_filter(self):
t = Template("""# coding: utf-8
some stuff.... ${x}
""", default_filters=['decode.utf8'])
#print t.code
assert t.render_unicode(x="voix m’a réveillé").strip() == u"some stuff.... voix m’a réveillé"
def test_custom_default(self):
t = Template("""
<%!
def myfilter(x):
return "->" + x + "<-"
%>
hi ${'there'}
""", default_filters=['myfilter'])
assert t.render().strip()=="hi ->there<-"
def test_global(self):
t = Template("""
<%page expression_filter="h"/>
${"<tag>this is html</tag>"}
""")
assert t.render().strip() == "<tag>this is html</tag>"
def test_nflag(self):
t = Template("""
${"<tag>this is html</tag>" | n}
""", default_filters=['h', 'unicode'])
assert t.render().strip() == "<tag>this is html</tag>"
t = Template("""
<%page expression_filter="h"/>
${"<tag>this is html</tag>" | n}
""")
assert t.render().strip() == "<tag>this is html</tag>"
t = Template("""
<%page expression_filter="h"/>
${"<tag>this is html</tag>" | n, h}
""")
assert t.render().strip() == "<tag>this is html</tag>"
def testnonexpression(self):
t = Template("""
<%!
def a(text):
return "this is a"
def b(text):
return "this is b"
%>
${foo()}
<%def name="foo()" buffered="True">
this is text
</%def>
""", buffer_filters=['a'])
assert t.render().strip() == "this is a"
t = Template("""
<%!
def a(text):
return "this is a"
def b(text):
return "this is b"
%>
${'hi'}
${foo()}
<%def name="foo()" buffered="True">
this is text
</%def>
""", buffer_filters=['a'], default_filters=['b'])
assert flatten_result(t.render()) == "this is b this is b"
t = Template("""
<%!
class Foo(object):
foo = True
def __str__(self):
return "this is a"
def a(text):
return Foo()
def b(text):
if hasattr(text, 'foo'):
return str(text)
else:
return "this is b"
%>
${'hi'}
${foo()}
<%def name="foo()" buffered="True">
this is text
</%def>
""", buffer_filters=['a'], default_filters=['b'])
assert flatten_result(t.render()) == "this is b this is a"
t = Template("""
<%!
def a(text):
return "this is a"
def b(text):
return "this is b"
%>
${foo()}
${bar()}
<%def name="foo()" filter="b">
this is text
</%def>
<%def name="bar()" filter="b" buffered="True">
this is text
</%def>
""", buffer_filters=['a'])
assert flatten_result(t.render()) == "this is b this is a"
def test_builtins(self):
t = Template("""
${"this is <text>" | h}
""")
assert flatten_result(t.render()) == "this is <text>"
t = Template("""
http://foo.com/arg1=${"hi! this is a string." | u}
""")
assert flatten_result(t.render()) == "http://foo.com/arg1=hi%21+this+is+a+string."
class BufferTest(unittest.TestCase):
def test_buffered_def(self):
t = Template("""
<%def name="foo()" buffered="True">
this is foo
</%def>
${"hi->" + foo() + "<-hi"}
""")
assert flatten_result(t.render()) == "hi-> this is foo <-hi"
def test_unbuffered_def(self):
t = Template("""
<%def name="foo()" buffered="False">
this is foo
</%def>
${"hi->" + foo() + "<-hi"}
""")
assert flatten_result(t.render()) == "this is foo hi-><-hi"
def test_capture(self):
t = Template("""
<%def name="foo()" buffered="False">
this is foo
</%def>
${"hi->" + capture(foo) + "<-hi"}
""")
assert flatten_result(t.render()) == "hi-> this is foo <-hi"
def test_capture_exception(self):
template = Template("""
<%def name="a()">
this is a
<%
raise TypeError("hi")
%>
</%def>
<%
c = capture(a)
%>
a->${c}<-a
""")
try:
template.render()
assert False
except TypeError:
assert True
def test_buffered_exception(self):
template = Template("""
<%def name="a()" buffered="True">
<%
raise TypeError("hi")
%>
</%def>
${a()}
""")
try:
print template.render()
assert False
except TypeError:
assert True
def test_capture_ccall(self):
t = Template("""
<%def name="foo()">
<%
x = capture(caller.body)
%>
this is foo. body: ${x}
</%def>
<%call expr="foo()">
ccall body
</%call>
""")
#print t.render()
assert flatten_result(t.render()) == "this is foo. body: ccall body"
| mit |
minorua/QGIS | python/plugins/processing/core/ProcessingResults.py | 15 | 1581 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingResults.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from qgis.PyQt.QtCore import QObject, pyqtSignal
class ProcessingResults(QObject):
resultAdded = pyqtSignal()
results = []
def addResult(self, icon, name, timestamp, result):
self.results.append(Result(icon, name, timestamp, result))
self.resultAdded.emit()
def getResults(self):
return self.results
class Result:
def __init__(self, icon, name, timestamp, filename):
self.icon = icon
self.name = name
self.timestamp = timestamp
self.filename = filename
resultsList = ProcessingResults()
| gpl-2.0 |
engdan77/edoAutoHomeMobile | twisted/protocols/htb.py | 51 | 9330 | # -*- test-case-name: twisted.test.test_htb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Hierarchical Token Bucket traffic shaping.
Patterned after U{Martin Devera's Hierarchical Token Bucket traffic
shaper for the Linux kernel<http://luxik.cdi.cz/~devik/qos/htb/>}.
@seealso: U{HTB Linux queuing discipline manual - user guide
<http://luxik.cdi.cz/~devik/qos/htb/manual/userg.htm>}
@seealso: U{Token Bucket Filter in Linux Advanced Routing & Traffic Control
HOWTO<http://lartc.org/howto/lartc.qdisc.classless.html#AEN682>}
"""
# TODO: Investigate whether we should be using os.times()[-1] instead of
# time.time. time.time, it has been pointed out, can go backwards. Is
# the same true of os.times?
from time import time
from zope.interface import implements, Interface
from twisted.protocols import pcp
class Bucket:
"""
Implementation of a Token bucket.
A bucket can hold a certain number of tokens and it drains over time.
@cvar maxburst: The maximum number of tokens that the bucket can
hold at any given time. If this is C{None}, the bucket has
an infinite size.
@type maxburst: C{int}
@cvar rate: The rate at which the bucket drains, in number
of tokens per second. If the rate is C{None}, the bucket
drains instantaneously.
@type rate: C{int}
"""
maxburst = None
rate = None
_refcount = 0
def __init__(self, parentBucket=None):
"""
Create a L{Bucket} that may have a parent L{Bucket}.
@param parentBucket: If a parent Bucket is specified,
all L{add} and L{drip} operations on this L{Bucket}
will be applied on the parent L{Bucket} as well.
@type parentBucket: L{Bucket}
"""
self.content = 0
self.parentBucket = parentBucket
self.lastDrip = time()
def add(self, amount):
"""
Adds tokens to the L{Bucket} and its C{parentBucket}.
This will add as many of the C{amount} tokens as will fit into both
this L{Bucket} and its C{parentBucket}.
@param amount: The number of tokens to try to add.
@type amount: C{int}
@returns: The number of tokens that actually fit.
@returntype: C{int}
"""
self.drip()
if self.maxburst is None:
allowable = amount
else:
allowable = min(amount, self.maxburst - self.content)
if self.parentBucket is not None:
allowable = self.parentBucket.add(allowable)
self.content += allowable
return allowable
def drip(self):
"""
Let some of the bucket drain.
The L{Bucket} drains at the rate specified by the class
variable C{rate}.
@returns: C{True} if the bucket is empty after this drip.
@returntype: C{bool}
"""
if self.parentBucket is not None:
self.parentBucket.drip()
if self.rate is None:
self.content = 0
else:
now = time()
deltaTime = now - self.lastDrip
deltaTokens = deltaTime * self.rate
self.content = max(0, self.content - deltaTokens)
self.lastDrip = now
return self.content == 0
class IBucketFilter(Interface):
def getBucketFor(*somethings, **some_kw):
"""
Return a L{Bucket} corresponding to the provided parameters.
@returntype: L{Bucket}
"""
class HierarchicalBucketFilter:
"""
Filter things into buckets that can be nested.
@cvar bucketFactory: Class of buckets to make.
@type bucketFactory: L{Bucket}
@cvar sweepInterval: Seconds between sweeping out the bucket cache.
@type sweepInterval: C{int}
"""
implements(IBucketFilter)
bucketFactory = Bucket
sweepInterval = None
def __init__(self, parentFilter=None):
self.buckets = {}
self.parentFilter = parentFilter
self.lastSweep = time()
def getBucketFor(self, *a, **kw):
"""
Find or create a L{Bucket} corresponding to the provided parameters.
Any parameters are passed on to L{getBucketKey}, from them it
decides which bucket you get.
@returntype: L{Bucket}
"""
if ((self.sweepInterval is not None)
and ((time() - self.lastSweep) > self.sweepInterval)):
self.sweep()
if self.parentFilter:
parentBucket = self.parentFilter.getBucketFor(self, *a, **kw)
else:
parentBucket = None
key = self.getBucketKey(*a, **kw)
bucket = self.buckets.get(key)
if bucket is None:
bucket = self.bucketFactory(parentBucket)
self.buckets[key] = bucket
return bucket
def getBucketKey(self, *a, **kw):
"""
Construct a key based on the input parameters to choose a L{Bucket}.
The default implementation returns the same key for all
arguments. Override this method to provide L{Bucket} selection.
@returns: Something to be used as a key in the bucket cache.
"""
return None
def sweep(self):
"""
Remove empty buckets.
"""
for key, bucket in self.buckets.items():
bucket_is_empty = bucket.drip()
if (bucket._refcount == 0) and bucket_is_empty:
del self.buckets[key]
self.lastSweep = time()
class FilterByHost(HierarchicalBucketFilter):
"""
A Hierarchical Bucket filter with a L{Bucket} for each host.
"""
sweepInterval = 60 * 20
def getBucketKey(self, transport):
return transport.getPeer()[1]
class FilterByServer(HierarchicalBucketFilter):
"""
A Hierarchical Bucket filter with a L{Bucket} for each service.
"""
sweepInterval = None
def getBucketKey(self, transport):
return transport.getHost()[2]
class ShapedConsumer(pcp.ProducerConsumerProxy):
"""
Wraps a C{Consumer} and shapes the rate at which it receives data.
"""
# Providing a Pull interface means I don't have to try to schedule
# traffic with callLaters.
iAmStreaming = False
def __init__(self, consumer, bucket):
pcp.ProducerConsumerProxy.__init__(self, consumer)
self.bucket = bucket
self.bucket._refcount += 1
def _writeSomeData(self, data):
# In practice, this actually results in obscene amounts of
# overhead, as a result of generating lots and lots of packets
# with twelve-byte payloads. We may need to do a version of
# this with scheduled writes after all.
amount = self.bucket.add(len(data))
return pcp.ProducerConsumerProxy._writeSomeData(self, data[:amount])
def stopProducing(self):
pcp.ProducerConsumerProxy.stopProducing(self)
self.bucket._refcount -= 1
class ShapedTransport(ShapedConsumer):
"""
Wraps a C{Transport} and shapes the rate at which it receives data.
This is a L{ShapedConsumer} with a little bit of magic to provide for
the case where the consumer it wraps is also a C{Transport} and people
will be attempting to access attributes this does not proxy as a
C{Consumer} (e.g. C{loseConnection}).
"""
# Ugh. We only wanted to filter IConsumer, not ITransport.
iAmStreaming = False
def __getattr__(self, name):
# Because people will be doing things like .getPeer and
# .loseConnection on me.
return getattr(self.consumer, name)
class ShapedProtocolFactory:
"""
Dispense C{Protocols} with traffic shaping on their transports.
Usage::
myserver = SomeFactory()
myserver.protocol = ShapedProtocolFactory(myserver.protocol,
bucketFilter)
Where C{SomeServerFactory} is a L{twisted.internet.protocol.Factory}, and
C{bucketFilter} is an instance of L{HierarchicalBucketFilter}.
"""
def __init__(self, protoClass, bucketFilter):
"""
Tell me what to wrap and where to get buckets.
@param protoClass: The class of C{Protocol} this will generate
wrapped instances of.
@type protoClass: L{Protocol<twisted.internet.interfaces.IProtocol>}
class
@param bucketFilter: The filter which will determine how
traffic is shaped.
@type bucketFilter: L{HierarchicalBucketFilter}.
"""
# More precisely, protoClass can be any callable that will return
# instances of something that implements IProtocol.
self.protocol = protoClass
self.bucketFilter = bucketFilter
def __call__(self, *a, **kw):
"""
Make a C{Protocol} instance with a shaped transport.
Any parameters will be passed on to the protocol's initializer.
@returns: A C{Protocol} instance with a L{ShapedTransport}.
"""
proto = self.protocol(*a, **kw)
origMakeConnection = proto.makeConnection
def makeConnection(transport):
bucket = self.bucketFilter.getBucketFor(transport)
shapedTransport = ShapedTransport(transport, bucket)
return origMakeConnection(shapedTransport)
proto.makeConnection = makeConnection
return proto
| mit |
acsone/Arelle | arelle/plugin/xbrlDB/XbrlSemanticJsonDB.py | 2 | 49520 | '''
XbrlSemanticJsonDB.py implements an JSON database interface for Arelle, based
on a concrete realization of the Abstract Model PWD 2.0 layer. This is a semantic
representation of XBRL information.
This module may save directly to a JSON Server (TBD) or to append to a file of JSON.
This module provides the execution context for saving a dts and instances in
XBRL JSON graph. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
Example dialog or command line parameters for operation:
host: the supporting host for JSON Server or "jsonFile" to append to a JSON file
port: the host port (80 is default) if a JSON Server
user, password: if needed for server
database: the top level path segment for the JSON Server or disk file path if jsonFile
timeout:
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to do:
1) add AMTF cube regions (dimensions)
HF - don't believe this is either feasible or has a use case in a graph model
2) check existence of (shared) documents and contained elements before adding
3) tuple structure declaration (particles in elements of data dictionary?)
4) tuple structure (instance facts)
5) add footnote resources to relationships (and test with EDInet footnote references)
6) test some filings with text blocks (shred them?) (30mB - 50mB sized text blocks?)
7) add mappings to, or any missing relationships, of Charlie's financial model
'''
import os, io, time, json, socket, logging, zlib, datetime
from arelle.ModelDtsObject import ModelConcept, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelDocument import Type
from arelle import XbrlConst, XmlUtil, UrlUtil
import urllib.request
from urllib.error import HTTPError, URLError
from lxml import etree
from decimal import Decimal
import datetime
TRACEJSONFILE = None
#TRACEJSONFILE = r"c:\temp\jsonDBtrace.log" # uncomment to trace JSON on connection (very big file!!!)
JSONFILE_HOSTNAME = "jsonFile"
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, rssItem=None, **kwargs):
jsondb = None
try:
jsondb = XbrlSemanticJsonDatabaseConnection(modelXbrl, user, password, host, port, database, timeout)
jsondb.insertXbrl(rssItem=rssItem)
jsondb.close()
except Exception as ex:
if jsondb is not None:
try:
jsondb.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, db, timeout=10):
if host == JSONFILE_HOSTNAME:
return True
# determine if postgres port
t = 2
while t < timeout:
try:
conn = urllib.request.urlopen("http://{0}:{1}/{2}/status".format(host, port or '80', db))
return True # success but doesn't need password
except HTTPError:
return False # success, this is really a postgres socket, wants user name
except URLError:
return False # something is there but not postgres
except socket.timeout:
t = t + 2 # relax - try again with longer timeout
return False
# top level JSON Graph object keynames
FILINGS = "filings"
DOCUMENTS = "documents"
def modelObjectDocumentUri(modelObject):
return UrlUtil.ensureUrl(modelObject.modelDocument.uri)
def modelObjectUri(modelObject):
return '#'.join((modelObjectDocumentUri(modelObject),
XmlUtil.elementFragmentIdentifier(modelObject)))
def qnameUri(qname, sep='#'):
return sep.join((qname.namespaceURI, qname.localName))
def qnamePrefix_Name(qname, sep=':'):
# substitutte standard prefixes for commonly-defaulted xmlns namespaces
prefix = {XbrlConst.xsd: 'xsd',
XbrlConst.xml: 'xml',
XbrlConst.xbrli: 'xbrli',
XbrlConst.link: 'link',
XbrlConst.gen: 'gen',
XbrlConst.xlink: 'xlink'
}.get(qname.namespaceURI, qname.prefix)
return sep.join((prefix, qname.localName))
def modelObjectQnameUri(modelObject, sep='#'):
return qnameUri(modelObject.qname, sep)
def modelObjectNameUri(modelObject, sep='#'):
return '#'.join((modelObjectDocumentUri(modelObject),
modelObject.name)) # for schema definitions with name attribute
class XJDBException(Exception):
def __init__(self, code, message, **kwargs ):
self.code = code
self.message = message
self.kwargs = kwargs
self.args = ( self.__repr__(), )
def __repr__(self):
return _('[{0}] exception: {1}').format(self.code, self.message % self.kwargs)
def jsonDefaultEncoder(obj):
if isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, (datetime.date, datetime.datetime)):
return XmlUtil.dateunionValue(obj)
raise TypeError("Type {} is not supported for json output".format(type(obj).__name__))
class XbrlSemanticJsonDatabaseConnection():
def __init__(self, modelXbrl, user, password, host, port, database, timeout):
self.modelXbrl = modelXbrl
self.disclosureSystem = modelXbrl.modelManager.disclosureSystem
#self.conn = RexProConnection(host, int(port or '8182'), (database or 'emptygraph'),
# user=user, password=password)
self.isJsonFile = host == JSONFILE_HOSTNAME
if self.isJsonFile:
self.jsonFile = database
else:
connectionUrl = "http://{0}:{1}".format(host, port or '80')
self.url = connectionUrl + '/' + database
# Create an OpenerDirector with support for Basic HTTP Authentication...
auth_handler = urllib.request.HTTPBasicAuthHandler()
if user:
auth_handler.add_password(realm=None,
uri=connectionUrl,
user=user,
passwd=password)
self.conn = urllib.request.build_opener(auth_handler)
self.timeout = timeout or 60
self.verticePropTypes = {}
def close(self, rollback=False):
try:
if not self.isJsonFile:
self.conn.close()
self.__dict__.clear() # dereference everything
except Exception as ex:
self.__dict__.clear() # dereference everything
raise
@property
def isClosed(self):
return not bool(self.__dict__) # closed when dict is empty
def showStatus(self, msg, clearAfter=None):
self.modelXbrl.modelManager.showStatus(msg, clearAfter)
def execute(self, activity, graph=None, query=None):
if graph is not None:
headers = {'User-agent': 'Arelle/1.0',
'Accept': 'application/json',
'Content-Type': "text/json; charset='UTF-8'"}
data = _STR_UNICODE(json.dumps(graph,
sort_keys=True, # allow comparability of json files
ensure_ascii=False,
indent=2,
default=jsonDefaultEncoder)) # might not be unicode in 2.7
elif query is not None:
headers = {'User-agent': 'Arelle/1.0',
'Accept': 'application/json'}
data = ("query=" + query)
else:
return None
# turtle may be mixture of line strings and strings with \n-separated lines
if TRACEJSONFILE:
with io.open(TRACEJSONFILE, 'at', encoding='utf-8') as fh:
fh.write("\n\n>>> sent: \n")
fh.write(data)
if self.isJsonFile and data is not None:
with io.open(self.jsonFile, 'at', encoding='utf-8') as fh:
fh.write(data)
return None
if graph is not None or query is not None:
url = self.url + "/json"
request = urllib.request.Request(url,
data=data.encode('utf-8'),
headers=headers)
try:
with self.conn.open(request, timeout=self.timeout) as fp:
results = fp.read().decode('utf-8')
try:
results = json.loads(results)
except ValueError:
pass # leave results as string
except HTTPError as err:
results = err.fp.read().decode('utf-8')
if TRACEJSONFILE:
with io.open(TRACEJSONFILE, "a", encoding='utf-8') as fh:
fh.write("\n\n>>> received: \n{0}".format(str(results)))
if isinstance(results, str) and query is not None:
parser = etree.HTMLParser()
htmlDoc = etree.parse(io.StringIO(results), parser)
body = htmlDoc.find("//body")
if body is not None:
error = "".join(text for text in body.itertext())
else:
error = results
raise XJDBException("jsonDB:DatabaseError",
_("%(activity)s not successful: %(error)s"),
activity=activity, error=error)
return results
def commit(self, graph):
self.execute("Saving RDF Graph", graph=graph)
def loadGraphRootVertices(self):
self.showStatus("Load/Create graph root vertices")
pass
def getDBsize(self):
self.showStatus("Get database size")
return 0
def insertXbrl(self, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
#initialVcount, initialEcount = self.getDBsize() # don't include in timing, very slow
startedAt = time.time()
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
g = {FILINGS:{},
DOCUMENTS:{}}
self.insertSchema(g)
# self.load() this done in the verify step
self.insertFiling(rssItem,g)
self.insertDocuments(g)
self.insertDataDictionary() # XML namespaces types aspects
#self.insertRelationshipTypeSets()
#self.insertResourceRoleSets()
#self.insertAspectValues()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: data points insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationshipSets()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: Relationships insertion"), time.time() - startedAt)
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: Validation results insertion"), time.time() - startedAt)
#startedAt = time.time()
#self.insertValidCombinations()
#self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: Valid Combinations insertion"), time.time() - startedAt)
self.showStatus("Committing entries")
self.commit(g)
self.modelXbrl.profileStat(_("XbrlSemanticJsonDB: insertion committed"), time.time() - startedAt)
#finalVcount, finalEcount = self.getDBsize()
#self.modelXbrl.modelManager.addToLog("added vertices: {0}, edges: {1}, total vertices: {2}, edges: {3}".format(
# finalVcount - initialVcount, finalEcount - initialEcount, finalVcount, finalEcount))
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def insertSchema(self, g):
if True: # if schema not defined
self.showStatus("insert schema")
# Filings schema
# Aspect schema
# Relationships schema
# DataPoints schema
def insertFiling(self, rssItem, g):
self.showStatus("insert filing")
# accession graph -> document vertices
new_filing = {'documents': []}
if self.modelXbrl.modelDocument.creationSoftwareComment:
new_filing['creation_software'] = self.modelXbrl.modelDocument.creationSoftwareComment
datetimeNow = datetime.datetime.now()
datetimeNowStr = XmlUtil.dateunionValue(datetimeNow)
entryUri = modelObjectDocumentUri(self.modelXbrl)
if rssItem is not None: # sec accession
# set self.
new_filing['filingType'] = "SEC filing"
# for an RSS Feed entry from SEC, use rss item's accession information
new_filing['filingNumber'] = filingNumber = rssItem.accessionNumber
new_filing['acceptedTimestamp'] = XmlUtil.dateunionValue(rssItem.acceptanceDatetime)
new_filing['filingDate'] = XmlUtil.dateunionValue(rssItem.filingDate)
new_filing['entityId'] = rssItem.cikNumber
new_filing['entityName'] = rssItem.companyName
new_filing['SICCode'] = rssItem.assignedSic
new_filing['SECHtmlUrl'] = rssItem.htmlUrl
new_filing['entryUrl'] = rssItem.url
self.filingURI = rssItem.htmlUrl
else:
# not an RSS Feed item, make up our own accession ID (the time in seconds of epoch)
intNow = int(time.time())
new_filing['filingNumber'] = filingNumber = str(intNow)
self.filingId = int(time.time()) # only available if entered from an SEC filing
new_filing['filingType'] = "independent filing"
new_filing['acceptedTimestamp'] = datetimeNowStr
new_filing['filingDate'] = datetimeNowStr
new_filing['entryUrl'] = UrlUtil.ensureUrl(self.modelXbrl.fileSource.url)
self.filingURI = filingNumber
g[FILINGS][self.filingURI] = new_filing
self.filing = new_filing
# for now only one report per filing (but SEC may have multiple in future, such as form SD)
self.reportURI = modelObjectDocumentUri(self.modelXbrl)
self.report = {'filing': self.filingURI,
'aspectProxies': {},
'relationshipSets': {},
'dataPoints': {},
'messages': {}}
new_filing['reports'] = {self.reportURI: self.report}
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
def identifyPreexistingDocuments(self):
self.existingDocumentUris = set()
if not self.isJsonFile:
docFilters = []
for modelDocument in self.modelXbrl.urlDocs.values():
if modelDocument.type == Type.SCHEMA:
docFilters.append('STR(?doc) = "{}"'.format(UrlUtil.ensureUrl(modelDocument.uri)))
results = self.execute(
# TBD: fix up for Mongo DB query
"select",
query="""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX DTS: <http://xbrl.org/2013/rdf/DTS/>
SELECT distinct ?doc WHERE { ?doc rdf:type DTS:Document
FILTER( """ + '\n|| '.join(docFilters) + ") .}")
try:
for result in results['results']['bindings']:
doc = result['doc']
if doc.get('type') == 'uri':
self.existingDocumentUris.add(doc['value'])
except KeyError:
pass # no existingDocumentUris
def insertDocuments(self,g):
# accession->documents
#
self.showStatus("insert documents")
documents = self.documents = g[DOCUMENTS]
for modelDocument in self.modelXbrl.urlDocs.values():
docUri = modelObjectDocumentUri(modelDocument)
if docUri not in self.existingDocumentUris:
documents[docUri] = {
'url': docUri,
'documentType': Type.typeName[modelDocument.type],
'references': [modelObjectDocumentUri(doc)
for doc, ref in modelDocument.referencesDocument.items()
if doc.inDTS and ref.referenceTypes & {"href", "import", "include"}],
'resources': {}
}
self.filing['documents'].append(docUri)
if modelDocument.uri == self.modelXbrl.modelDocument.uri: # entry document
self.report['entryPoint'] = docUri
def conceptsUsed(self):
conceptsUsed = set(f.qname for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
conceptsUsed.add(dim.dimensionQname)
if dim.isExplicit:
conceptsUsed.add(dim.memberQname)
else:
conceptsUsed.add(dim.typedMember.qname)
for defaultDim, defaultDimMember in self.modelXbrl.qnameDimensionDefaults.items():
conceptsUsed.add(defaultDim)
conceptsUsed.add(defaultDimMember)
for roleTypes in (self.modelXbrl.roleTypes, self.modelXbrl.arcroleTypes):
for modelRoleTypes in roleTypes.values():
for modelRoleType in modelRoleTypes:
for qn in modelRoleType.usedOns:
conceptsUsed.add(qn)
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
conceptsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
conceptsUsed.add(rel.toModelObject)
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
conceptsUsed.add(qn)
conceptsUsed -= {None} # remove None if in conceptsUsed
return conceptsUsed
def insertDataDictionary(self):
# separate graph
# document-> dataTypeSet -> dataType
# do all schema dataTypeSet vertices
self.type_id = {}
self.aspect_id = {}
self.aspect_proxy = {}
self.aspect_proxy_uri = {}
self.roleType_id = {}
self.arcroleType_id = {}
'''
if any((not self.document_isNew[modelDocument.uri])
for modelDocument in self.modelXbrl.urlDocs.values()):
conceptsUsed = self.conceptsUsed()
'''
conceptsUsed = self.conceptsUsed()
for modelDocument in self.modelXbrl.urlDocs.values():
self.showStatus("insert DataDictionary " + modelDocument.basename)
docUri = modelObjectDocumentUri(modelDocument)
document = self.documents[docUri]
# don't re-output existing documents
if modelDocument.type == Type.SCHEMA:
isNewDocument = True # self.document_isNew[modelDocument.uri]
modelConcepts = [modelConcept
for modelConcept in self.modelXbrl.qnameConcepts.values()
if modelConcept.modelDocument is modelDocument and
(isNewDocument or modelConcept in conceptsUsed)]
if docUri not in self.existingDocumentUris:
# adding document as new
document['dataTypes'] = dataTypes = {}
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument is modelDocument:
dataTypes[modelType.name] = dataType = {
'dataType': modelObjectNameUri(modelType),
'document': modelObjectDocumentUri(modelType),
'url': modelObjectUri(modelType),
'namespaceURI': modelType.qname.namespaceURI,
'localName': modelType.name,
}
xbrliBaseType = modelType.baseXbrliTypeQname
if not isinstance(xbrliBaseType, (tuple,list)):
xbrliBaseType = (xbrliBaseType,)
for baseType in xbrliBaseType:
if baseType is not None:
dataType['baseType'] = qnameUri(baseType)
if baseType.namespaceURI == "http://www.w3.org/2001/XMLSchema":
dataType['baseXsdType'] = qnameUri(baseType)
typeDerivedFrom = modelType.typeDerivedFrom
if not isinstance(typeDerivedFrom, (tuple,list)): # list if a union
typeDerivedFrom = (typeDerivedFrom,)
for dt in typeDerivedFrom:
if dt is not None:
dataType['derivedFrom'] = modelObjectNameUri(dt)
for prop in ('isTextBlock', 'isDomainItemType'):
propertyValue = getattr(modelType, prop, None)
if propertyValue:
dataType[prop] = propertyValue
document['aspects'] = aspects = {}
for modelConcept in modelConcepts:
aspects[modelConcept.name] = aspect = {
'document': modelObjectDocumentUri(modelConcept),
'url': modelObjectUri(modelConcept),
'namespaceURI': modelConcept.qname.namespaceURI,
'localName': modelConcept.name,
'isAbstract': modelConcept.isAbstract
}
if modelConcept.periodType:
aspect['periodType'] = modelConcept.periodType
if modelConcept.balance:
aspect['balance'] = modelConcept.balance
for prop in ('isItem', 'isTuple', 'isLinkPart',
'isNumeric', 'isMonetary', 'isExplicitDimension',
'isDimensionItem', 'isPrimaryItem',
'isTypedDimension', 'isDomainMember', 'isHypercubeItem',
'isShares', 'isTextBlock', 'isNillable'):
propertyValue = getattr(modelConcept, prop, None)
if propertyValue:
aspect[prop] = propertyValue
conceptType = modelConcept.type
if conceptType is not None:
aspect['dataType'] = modelObjectNameUri(conceptType)
substitutionGroup = modelConcept.substitutionGroup
if substitutionGroup is not None:
aspect['substitutionGroup'] = modelObjectNameUri(substitutionGroup)
document['roleTypes'] = roleTypes = {}
for modelRoleTypes in self.modelXbrl.roleTypes.values():
for modelRoleType in modelRoleTypes:
roleTypes[modelRoleType.roleURI] = roleType = {
'document': modelObjectDocumentUri(modelRoleType),
'url': modelObjectUri(modelRoleType),
'roleURI': modelRoleType.roleURI,
'definition': modelRoleType.definition,
'usedOn': [modelObjectUri(self.modelXbrl.qnameConcepts[qn])
for qn in modelRoleType.usedOns]
}
document['arcroleTypes'] = arcroleTypes = {}
for modelArcroleTypes in self.modelXbrl.arcroleTypes.values():
for modelArcroleType in modelArcroleTypes:
arcroleTypes[modelRoleType.roleURI] = arcroleType = {
'document': modelObjectDocumentUri(modelArcroleType),
'url': modelObjectUri(modelArcroleType),
'arcroleURI': modelArcroleType.roleURI,
'definition': modelArcroleType.definition,
'usedOn': [modelObjectUri(self.modelXbrl.qnameConcepts[qn])
for qn in modelArcroleType.usedOns],
'cyclesAllowed': modelArcroleType.cyclesAllowed
}
activity = "Insert data dictionary types, aspects, roles, and arcroles for " + modelDocument.uri
'''
def insertValidCombinations(self):
# document-> validCombinationsSet-> cubes
self.showStatus("insert ValidCombinations")
drsELRs = set(ELR
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.values()
if arcrole == XbrlConst.all)
hasHcRels = self.modelXbrl.relationshipSet(XbrlConst.all).modelRelationships
hcConcepts = set(hasHcRel.toModelObject for hasHcRel in hasHcRels)
# one cube region per head pri item with multiple cube regions
for hcConcept in hcConcepts:
# include any other concepts in this pri item with clean inheritance
for drsELR in drsELRs:
# each ELR is another cube region
for allRel in val.modelXbrl.relationshipSet(XbrlConst.all, ELR)
drsPriItems(val, fromELR, fromPriItem
... this becomes an unweildly large model, don't see a use case for compiling it out
'''
def insertAspectProxies(self, qnames):
aspectQnames = [qname
for qname in qnames
if qname not in self.aspect_proxy_uri and qname in self.modelXbrl.qnameConcepts]
for qname in aspectQnames:
self.insertAspectProxy(qname, qnamePrefix_Name(qname))
def insertAspectProxy(self, aspectQName, aspectProxyUri):
concept = self.modelXbrl.qnameConcepts[aspectQName]
self.report['aspectProxies'][aspectProxyUri] = aspectProxy = {
'report': self.reportURI,
'document': modelObjectDocumentUri(concept),
'name': concept.name
}
self.aspect_proxy[aspectQName] = aspectProxy
self.aspect_proxy_uri[aspectQName] = aspectProxyUri
return aspectProxy
def aspectQnameProxy(self, qname):
if hasattr(qname, "modelDocument"):
return self.aspect_proxy.get(qname.qname)
elif qname in self.modelXbrl.qnameConcepts:
return self.aspect_proxy.get(qname)
return None
def aspectQnameProxyId(self, qname):
if hasattr(qname, "modelDocument"):
return self.aspect_proxy_uri.get(qname.qname)
elif qname in self.modelXbrl.qnameConcepts:
return self.aspect_proxy_uri.get(qname)
return None
def insertDataPoints(self):
# separate graph
# document-> dataTypeSet -> dataType
self.showStatus("insert DataPoints")
# note these initial aspects Qnames used also must be in conceptsUsed above
dimensions = [] # index by hash of dimension
dimensionIds = {} # index for dimension
if self.modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL):
contextAspectValueSelections = {} # contexts processed already
unitIDs = set() # units processed already
periodProxies = {}
entityIdentifierAspectProxies = {}
dataPoints = self.report['dataPoints']
for fact in self.modelXbrl.factsInInstance:
self.insertAspectProxies( (fact.qname,) )
factId = XmlUtil.elementFragmentIdentifier(fact)
dataPoints[factId] = dataPoint = {
'document': modelObjectDocumentUri(fact),
'id': factId,
'sourceLine': fact.sourceline,
'dataPointUrl': modelObjectUri(fact),
'baseItem': self.aspectQnameProxyId(fact.qname)
}
context = fact.context
concept = fact.concept
if context is not None:
if context.entityIdentifier not in entityIdentifierAspectProxies:
entityScheme, entityIdentifier = context.entityIdentifier
entityIdentifierAspectProxy = "{}/{}".format(
qnamePrefix_Name(XbrlConst.qnXbrliIdentifier),
entityIdentifier)
e = self.insertAspectProxy(XbrlConst.qnXbrliIdentifier, entityIdentifierAspectProxy)
e['scheme'] = entityScheme
e['identifier'] = entityIdentifier
entityIdentifierAspectProxies[context.entityIdentifier] = entityIdentifierAspectProxy
else:
entityIdentifierAspectProxy = entityIdentifierAspectProxies[context.entityIdentifier]
dataPoint['entityIdentifier'] = entityIdentifierAspectProxy
if context.isForeverPeriod:
period = "forever"
if context.isInstantPeriod:
endDate = XmlUtil.dateunionValue(context.instantDatetime, subtractOneDay=True).replace(':','_')
period = "instant/{}".format(endDate)
else:
startDate = XmlUtil.dateunionValue(context.startDatetime).replace(':','_')
endDate = XmlUtil.dateunionValue(context.endDatetime, subtractOneDay=True).replace(':','_')
period = "duration/{}/{}".format(startDate, endDate)
if period not in periodProxies:
periodProxy = "{}/{}".format(
qnamePrefix_Name(XbrlConst.qnXbrliPeriod),
period)
p = self.insertAspectProxy(XbrlConst.qnXbrliPeriod, periodProxy)
p['isForever'] = context.isForeverPeriod
p['isInstant'] = context.isInstantPeriod
if context.isStartEndPeriod:
d = context.startDatetime
if d.hour == 0 and d.minute == 0 and d.second == 0:
d = d.date()
p['startDate'] = d
if context.isStartEndPeriod or context.isInstantPeriod:
d = context.endDatetime
if d.hour == 0 and d.minute == 0 and d.second == 0:
d = (d - datetime.timedelta(1)).date()
p['endDate'] = d
periodProxies[period] = periodProxy
else:
periodProxy = periodProxies[period]
dataPoint['period'] = periodProxy
dataPoint['contextUrl'] = modelObjectUri(context)
dataPoint['contextId'] = context.id
if context.id not in contextAspectValueSelections:
contextAspectValueSelections[context.id] = contextAspectValueSelection = []
for dimVal in context.qnameDims.values():
dim = qnamePrefix_Name(dimVal.dimensionQname)
if dimVal.isExplicit:
self.insertAspectProxies( (dimVal.memberQname,) ) # need imediate use of proxy
v = self.aspectQnameProxyId(dimVal.memberQname)
else:
v = dimVal.typedMember.stringValue
dimProxy = "{}/{}".format(dim, v)
d = self.insertAspectProxy(dimVal.dimensionQname, dimProxy)
contextAspectValueSelection.append(dimProxy)
d['aspect'] = dim
if dimVal.isExplicit:
d['aspectValue'] = v
else:
d['typedValue'] = v
else:
contextAspectValueSelection = contextAspectValueSelections[context.id]
dataPoint['aspectValueSelections'] = contextAspectValueSelection
if fact.isNumeric:
if fact.precision == "INF":
dataPoint['precision'] = "INF"
elif fact.precision is not None:
dataPoint['precision'] = fact.precision
if fact.decimals == "INF":
dataPoint['decimals'] = "INF"
elif fact.decimals is not None:
dataPoint['decimals'] = fact.decimals
if fact.unit is not None:
unit = fact.unit
unitProxy = "{}/{}".format(
qnamePrefix_Name(XbrlConst.qnXbrliUnit),
unit.id)
dataPoint['unit'] = unitProxy
if unit.id not in unitIDs:
unitIDs.add(unit.id)
u = self.insertAspectProxy(XbrlConst.qnXbrliUnit, unitProxy)
u['unitId'] = unit.id
mults, divs = unit.measures
u['multiplyMeasures'] = [qnameUri(qn) for qn in mults]
if divs:
u['divideMeasures'] = [qnameUri(qn) for qn in divs]
if fact.xmlLang is None and fact.concept is not None and fact.concept.baseXsdType is not None:
dataPoint['value'] = fact.xValue
# The insert with base XSD type but no language
elif fact.xmlLang is not None:
# assuming string type with language
dataPoint['language'] = fact.xmlLang
dataPoint['value'] = fact.value
else:
# Otherwise insert as plain liternal with no language or datatype
dataPoint['value'] = fact.value
if fact.modelTupleFacts:
dataPoint['tuple'] = [XmlUtil.elementFragmentIdentifier(tupleFact)
for tupleFact in fact.modelTupleFacts]
def resourceId(self,i):
return "<{accessionPrefix}resource/{i}>".format(accessionPrefix=self.thisAccessionPrefix,
i=i)
def insertRelationshipSets(self):
self.showStatus("insert relationship sets")
aspectQnamesUsed = set()
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, linkrole, linkqname, arcqname = relationshipSetKey
if linkqname:
aspectQnamesUsed.add(linkqname)
if arcqname:
aspectQnamesUsed.add(arcqname)
self.insertAspectProxies(aspectQnamesUsed)
relationshipSets = self.report['relationshipSets']
relSetIds = {}
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, linkrole, linkqname, arcqname = relationshipSetKey
if arcrole not in ("XBRL-formulae", "Table-rendering", "XBRL-footnotes") and linkrole and linkqname and arcqname:
# skip paths and qnames for now (add later for non-SEC)
relSetId = "{}/{}".format(
os.path.basename(arcrole),
os.path.basename(linkrole))
relSetIds[relationshipSetKey] = relSetId
relationshipSets[relSetId] = relationshipSet = {
'arcrole': arcrole,
'linkrole': linkrole,
'arcname': self.aspectQnameProxyId(arcqname),
'linkname': self.aspectQnameProxyId(linkqname),
'report': self.reportURI,
'roots': [],
'relationships': []
}
# do tree walk to build relationships with depth annotated, no targetRole navigation
relE = [] # fromV, toV, label
resources = set()
aspectQnamesUsed = set()
resourceIDs = {} # index by object
def walkTree(rels, parentRelId, seq, depth, relationshipSetKey, relationshipSet, visited, relSetId, doVertices):
for rel in rels:
if rel not in visited:
visited.add(rel)
if not doVertices:
_relProp = {'seq': seq,
'depth': depth,
'order': rel.orderDecimal,
'priority': rel.priority,
'relSetId': relSetId
}
if isinstance(rel.fromModelObject, ModelConcept):
if doVertices:
aspectQnamesUsed.add(rel.fromModelObject.qname)
sourceUri = True
else:
sourceQname = rel.fromModelObject.qname
sourceUri = self.aspectQnameProxyId(sourceQname)
sourceId = qnamePrefix_Name(rel.fromModelObject.qname)
else:
sourceUri = None # tbd
toModelObject = rel.toModelObject
if isinstance(toModelObject, ModelConcept):
if doVertices:
aspectQnamesUsed.add(toModelObject.qname)
targetUri = True
else:
targetUri = self.aspectQnameProxyId(toModelObject.qname)
targetId = qnamePrefix_Name(toModelObject.qname)
elif isinstance(toModelObject, ModelResource):
if doVertices:
resources.add(toModelObject)
targetUri = 0 # just can't be None, but doesn't matter on doVertices pass
else:
if rel.preferredLabel:
_relProp['preferredLabel'] = rel.preferredLabel
if rel.arcrole in (XbrlConst.all, XbrlConst.notAll):
_relProp['cubeClosed'] = rel.closed
elif rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember):
_relProp['aspectValueUsable'] = rel.usable
elif rel.arcrole == XbrlConst.summationItem:
_relProp['weight'] = rel.weightDecimal
if relationshipSet.arcrole == "XBRL-dimensions":
_relProp['arcrole'] = rel.arcrole
if toModelObject.role:
_relProp['resourceRole'] = toModelObject.role
targetUri = modelObjectUri(toModelObject)
targetId = toModelObject.modelDocument.basename + '#' + XmlUtil.elementFragmentIdentifier(toModelObject)
else:
targetUri = None # tbd
if sourceUri is not None and targetUri is not None:
targetRelSetId = relSetId
targetRelSetKey = relationshipSetKey
if relationshipSet.arcrole == "XBRL-dimensions" and rel.targetRole:
targetRelSet = self.modelXbrl.relationshipSet(relationshipSet.arcrole, rel.targetRole)
for i, relSetKey in enumerate(self.relationshipSets):
arcrole, ELR, linkqname, arcqname = relSetKey
if arcrole == "XBRL-dimensions" and ELR == rel.targetRole:
targetRelationshipSetId = relSetIds[relSetKey]
targetRelSetKey = relSetKey
break
if not doVertices:
_relProp['targetLinkrole'] = rel.targetRole
_relProp['targetRelSet'] = targetRelationshipSetId
else:
targetRelSetKey = relationshipSetKey
targetRelSet = relationshipSet
if doVertices:
relId = None
else:
_relProp['from'] = sourceUri
_relProp['fromQname'] = sourceQname
_relProp['to'] = targetUri
_arcrole = os.path.basename(rel.arcrole)
relId = "{}/{}/{}/{}".format(
_arcrole,
os.path.basename(rel.linkrole),
sourceId,
targetId)
_relProp['relId'] = relId
_relProp['relSetKey'] = relationshipSetKey
relE.append(_relProp)
seq += 1
seq = walkTree(targetRelSet.fromModelObject(toModelObject), relId, seq, depth+1, targetRelSetKey, targetRelSet, visited, targetRelSetId, doVertices)
visited.remove(rel)
return seq
for doVertices in range(1,-1,-1): # pass 0 = vertices, pass 1 = edges
for i, relationshipSetKey in enumerate(self.relationshipSets):
arcrole, linkrole, linkqname, arcqname = relationshipSetKey
if arcrole not in ("XBRL-formulae", "Table-rendering", "XBRL-footnotes") and linkrole and linkqname and arcqname:
relSetId = relSetIds[relationshipSetKey]
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), None, seq, 1, relationshipSetKey, relationshipSet, set(), relSetId, doVertices)
if doVertices:
if resources:
for resource in resources:
resourceUri = modelObjectUri(resource)
r = {'url': resourceUri,
'value': resource.stringValue
}
if resource.xmlLang:
r['language'] = resource.xmlLang
if resource.role:
r['role'] = resource.role
self.documents[modelObjectDocumentUri(resource)]['resources'][
XmlUtil.elementFragmentIdentifier(resource)] = r
self.insertAspectProxies(aspectQnamesUsed)
else:
for j, rel in enumerate(relE):
relId = rel['relId']
relSetId = rel['relSetId']
relSet = relationshipSets[relSetId]
r = dict((k,v)
for k,v in rel.items()
if k not in ('relId', 'relPredicate', 'relSetId', 'relSetKey', 'fromQname'))
relSet['relationships'].append(r)
if rel.get('depth', 0) == 1:
relSet['roots'].append(r)
sourceQname = rel['fromQname']
if sourceQname in self.aspect_proxy:
self.aspect_proxy[sourceQname] \
.setdefault('relationships', {}) \
.setdefault(rel['relSetId'], []) \
.append(rel['to'])
# TBD: do we want to link resources to the dts (by role, class, or otherwise?)
resourceIDs.clear() # dereferemce objects
resources = None
def insertValidationResults(self):
logEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
logEntries = handler.dbHandlerLogEntries()
break
messages = []
messageRefs = [] # direct link to objects
for i, logEntry in enumerate(logEntries):
messageId = "message/{}".format(i+1)
self.report['messages'][messageId] = m = {
'code': logEntry['code'],
'level': logEntry['level'],
'value': logEntry['message']['text'],
'report': self.reportURI,
'messageId': messageId
}
# capture message ref's
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
aspectObj = None
if isinstance(modelObject, ModelFact):
factId = XmlUtil.elementFragmentIdentifier(modelObject)
dataPoint = self.report['dataPoints'][factId]
dataPoint.setdefault('messages', []).append(messageId)
elif isinstance(modelObject, ModelConcept):
# be sure there's a proxy
self.insertAspectProxies( (modelObject.qname,)) # need imediate use of proxy
self.aspectQnameProxy(modelObject.qname).setdefault('messages', []).append(messageId)
elif isinstance(modelObject, ModelRelationship):
''' TBD
sourceId = qnamePrefix_Name(modelObject.fromModelObject.qname)
toModelObject = modelObject.toModelObject
if isinstance(toModelObject, ModelConcept):
targetId = qnamePrefix_Name(toModelObject.qname)
elif isinstance(toModelObject, ModelResource):
targetId = toModelObject.modelDocument.basename + '#' + XmlUtil.elementFragmentIdentifier(toModelObject)
else:
continue
objUri = URIRef("{}/Relationship/{}/{}/{}/{}".format(
self.reportURI,
os.path.basename(modelObject.arcrole),
os.path.basename(modelObject.linkrole),
sourceId,
targetId) )
'''
else:
continue
if messages:
self.showStatus("insert validation messages")
| apache-2.0 |
akretion/pos-addons | tg_partner_firstname/tg_partner_firstname.py | 11 | 2548 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 - Thierry Godin. All Rights Reserved
# @author Thierry Godin <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import netsvc, tools, pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class inherit_res_partner(osv.osv):
_name='res.partner'
_inherit='res.partner'
def write(self, cr, uid, ids, vals, context=None):
v_name = None
v_firstname = None
if vals.get('name'):
# name to Uppercase
v_name = vals['name'].strip()
vals['name'] = v_name.upper()
if vals.get('firstname'):
# firstname capitalized
v_firstname = vals['firstname'].strip()
vals['firstname'] = v_firstname.title()
result = super(inherit_res_partner,self).write(cr, uid, ids, vals, context=context)
return result
def create(self, cr, uid, vals, context=None):
v_name = None
v_firstname = None
if vals.get('name'):
# name to Uppercase
v_name = vals['name'].strip()
vals['name'] = v_name.upper()
if vals.get('firstname'):
# firstname capitalized
v_firstname = vals['firstname'].strip()
vals['firstname'] = v_firstname.title()
result = super(inherit_res_partner,self).create(cr, uid, vals, context=context)
return result
_columns = {
'firstname' : fields.char('Firstname', size=128),
}
| lgpl-3.0 |
purism/pdak | dak/dakdb/update76.py | 8 | 1666 | #!/usr/bin/env python
# coding=utf8
"""
Add list of closed bugs to changes table
@contact: Debian FTP Master <[email protected]>
@copyright: 2012 Ansgar Burchardt <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
from daklib.config import Config
################################################################################
def do_update(self):
print __doc__
try:
cnf = Config()
c = self.db.cursor()
c.execute("ALTER TABLE changes ADD COLUMN closes TEXT[]")
c.execute("UPDATE config SET value = '76' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 76, rollback issued. Error message: {0}'.format(msg))
| gpl-2.0 |
Oleg-k/kernel_source_s8600 | scripts/build-all.py | 1250 | 9474 | #! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
antsant/namebench | nb_third_party/graphy/backends/google_chart_api/__init__.py | 205 | 2023 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend which can generate charts using the Google Chart API."""
from graphy import line_chart
from graphy import bar_chart
from graphy import pie_chart
from graphy.backends.google_chart_api import encoders
def _GetChartFactory(chart_class, display_class):
"""Create a factory method for instantiating charts with displays.
Returns a method which, when called, will create & return a chart with
chart.display already populated.
"""
def Inner(*args, **kwargs):
chart = chart_class(*args, **kwargs)
chart.display = display_class(chart)
return chart
return Inner
# These helper methods make it easy to get chart objects with display
# objects already setup. For example, this:
# chart = google_chart_api.LineChart()
# is equivalent to:
# chart = line_chart.LineChart()
# chart.display = google_chart_api.encoders.LineChartEncoder(chart)
#
# (If there's some chart type for which a helper method isn't available, you
# can always just instantiate the correct encoder manually, like in the 2nd
# example above).
# TODO: fix these so they have nice docs in ipython (give them __doc__)
LineChart = _GetChartFactory(line_chart.LineChart, encoders.LineChartEncoder)
Sparkline = _GetChartFactory(line_chart.Sparkline, encoders.SparklineEncoder)
BarChart = _GetChartFactory(bar_chart.BarChart, encoders.BarChartEncoder)
PieChart = _GetChartFactory(pie_chart.PieChart, encoders.PieChartEncoder)
| apache-2.0 |
papoteur-mga/mageiaSync | setup.py | 1 | 1249 | from distutils.core import setup
import os
LOCALE_DIR= '/usr/share/locale'
locales = []
if os.path.exists('po/locale'):
for lang in os.listdir('po/locale'):
locales.append(os.path.join(lang, 'LC_MESSAGES'))
data_files = [("share/applications/", ["share/applications/mageiasync.desktop"]),
("share/icons/hicolor/scalable/apps", ["share/icons/mageiasync.svg"])
] + [(os.path.join(LOCALE_DIR, locale),
[os.path.join('po', 'locale', locale, 'mageiasync.mo')])
for locale in locales]
setup(
name = 'mageiasync',
version = '0.1.2',
packages = ['mageiaSync'],
scripts = ['mageiasync'],
license = 'GNU General Public License v3 (GPLv3)',
url = 'https://github.com/papoteur-mga/mageiaSync',
description = 'This tool downloads ISO images from mirror or Mageia testing server.',
long_description = 'This tool uses rsync with a GUI',
platforms = ['Linux'],
author = 'Papoteur',
author_email = '[email protected]',
maintainer = 'david_david',
maintainer_email = '[email protected]',
data_files = data_files,
)
| gpl-3.0 |
abstract-open-solutions/OCB | addons/edi/__init__.py | 437 | 1157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import controllers
from . import models
from . import edi_service
from .models.edi import EDIMixin, edi
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ehashman/oh-mainline | vendor/packages/Django/tests/regressiontests/context_processors/tests.py | 96 | 1344 | """
Tests for Django's bundled context processors.
"""
from django.test import TestCase
class RequestContextProcessorTests(TestCase):
"""
Tests for the ``django.core.context_processors.request`` processor.
"""
urls = 'regressiontests.context_processors.urls'
def test_request_attributes(self):
"""
Test that the request object is available in the template and that its
attributes can't be overridden by GET and POST parameters (#3828).
"""
url = '/request_attrs/'
# We should have the request object in the template.
response = self.client.get(url)
self.assertContains(response, 'Have request')
# Test is_secure.
response = self.client.get(url)
self.assertContains(response, 'Not secure')
response = self.client.get(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
response = self.client.post(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
# Test path.
response = self.client.get(url)
self.assertContains(response, url)
response = self.client.get(url, {'path': '/blah/'})
self.assertContains(response, url)
response = self.client.post(url, {'path': '/blah/'})
self.assertContains(response, url)
| agpl-3.0 |
tectronics/cortex-vfx | test/IECore/SWAReaderTest.py | 12 | 5143 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class SWAReaderTest( unittest.TestCase ) :
def testConstruction( self ) :
r = IECore.SWAReader()
self.assertEqual( r["fileName"].getTypedValue(), "" )
r = IECore.SWAReader( "test/IECore/data/swaFiles/test.swa" )
self.assertEqual( r["fileName"].getTypedValue(), "test/IECore/data/swaFiles/test.swa" )
def testReading( self ) :
r = IECore.SWAReader( "test/IECore/data/swaFiles/test.swa" )
o = r.read()
IECore.ObjectWriter( o, "/tmp/trees4.cob" ).write()
self.failUnless( o.isInstanceOf( IECore.PointsPrimitive.staticTypeId() ) )
self.assertEqual( o.numPoints, 5 + 6 )
self.failUnless( o.arePrimitiveVariablesValid() )
self.failUnless( "P" in o )
self.failUnless( "xAxis" in o )
self.failUnless( "yAxis" in o )
self.failUnless( "zAxis" in o )
self.failUnless( "scale" in o )
self.failUnless( "treeName" in o )
self.failUnless( "treeNameIndices" in o )
self.assertEqual( o["P"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["xAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["yAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["zAxis"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["scale"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeNameIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( o["treeName"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.failUnless( isinstance( o["P"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["xAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["yAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["zAxis"].data, IECore.V3fVectorData ) )
self.failUnless( isinstance( o["scale"].data, IECore.FloatVectorData ) )
self.failUnless( isinstance( o["treeNameIndices"].data, IECore.IntVectorData ) )
self.failUnless( isinstance( o["treeName"].data, IECore.StringVectorData ) )
self.assertEqual( o["treeName"].data, IECore.StringVectorData( [ "Acacia_RT", "BroadLeaf_HighDetail" ] ) )
self.assertEqual( o["P"].data[0], IECore.V3f( 3750.05, 1556.86, -2149.22 ) )
self.assertEqual( o["yAxis"].data[0], IECore.V3f( 0.0176831, 0.998519, 0.0514542 ) )
self.assertEqual( o["xAxis"].data[0], IECore.V3f( 0.0179192, -0.0517705, 0.998498 ) )
self.assertEqual( o["zAxis"].data[0], o["xAxis"].data[0].cross( o["yAxis"].data[0] ) )
self.assertAlmostEqual( o["scale"].data[0], 6.4516, 6 )
self.assertAlmostEqual( o["scale"].data[1], 6.7, 6 )
self.assertEqual( o["treeNameIndices"].data, IECore.IntVectorData( [ 0 ] * 5 + [ 1 ] * 6 ) )
def testCanRead( self ) :
self.failUnless( IECore.SWAReader.canRead( "test/IECore/data/swaFiles/test.swa" ) )
self.failIf( IECore.IDXReader.canRead( "test/IECore/data/exrFiles/carPark.exr" ) )
self.failIf( IECore.SWAReader.canRead( "test/IECore/data/idxFiles/test.idx" ) )
self.failIf( IECore.SWAReader.canRead( "test/IECore/data/empty" ) )
def testRegistration( self ) :
r = IECore.Reader.create( "test/IECore/data/swaFiles/test.swa" )
self.failUnless( isinstance( r, IECore.SWAReader ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Itxaka/libcloud | docs/examples/compute/vmware_vcloud_1.5.py | 60 | 2076 | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
# Skip this step if you are launching nodes on an official vCloud
# provider. It is intended only for self signed SSL certs in
# vanilla vCloud Director v1.5 test deployments.
# Note: Code like this poses a security risk (MITM attack) and
# that's the reason why you should never use it for anything else
# besides testing. You have been warned.
libcloud.security.VERIFY_SSL_CERT = False
vcloud = get_driver(Provider.VCLOUD)
driver = vcloud('you username@organisation', 'your password',
host='vcloud.local', api_version='1.5')
# List all instantiated vApps
nodes = driver.list_nodes()
# List all VMs within the first vApp instance
print nodes[0].extra['vms']
# List all available vApp Templates
images = driver.list_images()
image = [i for i in images if i.name == 'natty-server-cloudimg-amd64'][0]
# Create node with minimum set of parameters
node = driver.create_node(name='test node 1', image=image)
# Destroy the node
driver.destroy_node(node)
# Create node without deploying and powering it on
node = driver.create_node(name='test node 2', image=image, ex_deploy=False)
# Create node with custom CPU & Memory values
node = driver.create_node(name='test node 3', image=image, ex_vm_cpu=3,
ex_vm_memory=1024)
# Create node with customised networking parameters (eg. for OVF
# imported images)
node = driver.create_node(name='test node 4', image=image,
ex_vm_network='your vm net name',
ex_network='your org net name',
ex_vm_fence='bridged', ex_vm_ipmode='DHCP')
# Create node in a custom virtual data center
node = driver.create_node(name='test node 4', image=image,
ex_vdc='your vdc name')
# Create node with guest OS customisation script to be run at first boot
node = driver.create_node(name='test node 5', image=image,
ex_vm_script='filesystem path to your script')
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.