repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
knowsis/django | tests/mail/tests.py | 48 | 36821 | # coding: utf-8
from __future__ import unicode_literals
import asyncore
from email.mime.text import MIMEText
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from django.core import mail
from django.core.mail import (EmailMessage, mail_admins, mail_managers,
EmailMultiAlternatives, send_mail, send_mass_mail)
from django.core.mail.backends import console, dummy, locmem, filebased, smtp
from django.core.mail.message import BadHeaderError
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text, force_bytes
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (message_from_string as message_from_bytes,
message_from_file as message_from_binary_file)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contens of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected]')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected], [email protected]')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'], bcc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'])
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'), cc=('[email protected]', '[email protected]'), bcc=('[email protected]',))
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'])
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ascii) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers)
self.assertEqual(sorted(email.message().items()), [
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', '[email protected]'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', '[email protected]'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
])
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'],
headers={'To': '[email protected]'})
message = email.message()
self.assertEqual(message['To'], '[email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['To'], '[email protected], [email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
message = email.message()
self.assertEqual(message['From'], '[email protected]')
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['"Firstname Sürname" <[email protected]>', '[email protected]'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]')
email = EmailMessage('Subject', 'Content', '[email protected]', ['"Sürname, Firstname" <[email protected]>', '[email protected]'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "[email protected]", ["[email protected]"],
headers={"Sender": '"Firstname Sürname" <[email protected]>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '"Sürname, Firstname" <[email protected]>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertTrue(message.as_string().startswith('Content-Type: text/plain; charset="iso-8859-1"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nSubject: Subject\nFrom: [email protected]\nTo: [email protected]'))
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'))))
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'))))
self.assertTrue(payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>'))
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', '[email protected]', ['[email protected]']),
('Subject2', 'Content2', '[email protected]', ['[email protected]']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
self.assertFalse(b'>From the future' in email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
self.assertFalse(b'Content-Transfer-Encoding: base64' in msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 7bit' in s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
class BaseEmailBackendTests(object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError
def flush_mailbox(self):
raise NotImplementedError
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "[email protected]")
self.assertEqual(message.get_all("to"), ["[email protected]"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]'])
email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>',
["[email protected]"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>")
@override_settings(MANAGERS=[('nobody', '[email protected]')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', '[email protected]')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=(), MANAGERS=())
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertStartsWith(message.as_string(), 'MIME-Version: 1.0\nContent-Type: text/plain; charset="utf-8"\nContent-Transfer-Encoding: 7bit\nSubject: Subject\nFrom: [email protected]\nTo: [email protected]\nCc: [email protected]\nDate: ')
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.assertEqual(message.get('cc'), '[email protected]')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitely opened)
"""
conn = mail.get_connection(username='', password='')
try:
conn.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(HeadersCheckMixin, BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]'))))
self.assertIn(b'\nDate: ', message)
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
@classmethod
def setUpClass(cls):
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
try:
backend.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
| bsd-3-clause |
IONISx/edx-platform | openedx/core/lib/extract_tar.py | 135 | 2427 | """
Safe version of tarfile.extractall which does not extract any files that would
be, or symlink to a file that is, outside of the directory extracted in.
Adapted from:
http://stackoverflow.com/questions/10060069/safely-extract-zip-or-tar-using-python
"""
from os.path import abspath, realpath, dirname, join as joinpath
from django.core.exceptions import SuspiciousOperation
from django.conf import settings
import logging
log = logging.getLogger(__name__)
def resolved(rpath):
"""
Returns the canonical absolute path of `rpath`.
"""
return realpath(abspath(rpath))
def _is_bad_path(path, base):
"""
Is (the canonical absolute path of) `path` outside `base`?
"""
return not resolved(joinpath(base, path)).startswith(base)
def _is_bad_link(info, base):
"""
Does the file sym- or hard-link to files outside `base`?
"""
# Links are interpreted relative to the directory containing the link
tip = resolved(joinpath(base, dirname(info.name)))
return _is_bad_path(info.linkname, base=tip)
def safemembers(members, base):
"""
Check that all elements of a tar file are safe.
"""
base = resolved(base)
# check that we're not trying to import outside of the data_dir
if not base.startswith(resolved(settings.DATA_DIR)):
raise SuspiciousOperation("Attempted to import course outside of data dir")
for finfo in members:
if _is_bad_path(finfo.name, base):
log.debug("File %r is blocked (illegal path)", finfo.name)
raise SuspiciousOperation("Illegal path")
elif finfo.issym() and _is_bad_link(finfo, base):
log.debug("File %r is blocked: Hard link to %r", finfo.name, finfo.linkname)
raise SuspiciousOperation("Hard link")
elif finfo.islnk() and _is_bad_link(finfo, base):
log.debug("File %r is blocked: Symlink to %r", finfo.name,
finfo.linkname)
raise SuspiciousOperation("Symlink")
elif finfo.isdev():
log.debug("File %r is blocked: FIFO, device or character file",
finfo.name)
raise SuspiciousOperation("Dev file")
return members
def safetar_extractall(tar_file, path=".", members=None): # pylint: disable=unused-argument
"""
Safe version of `tar_file.extractall()`.
"""
return tar_file.extractall(path, safemembers(tar_file, path))
| agpl-3.0 |
zchking/odoo | addons/base_report_designer/plugin/openerp_report_designer/test/test_fields.py | 391 | 1308 | #
# Use this module to retrive the fields you need according to the type
# of the OpenOffice operation:
# * Insert a Field
# * Insert a RepeatIn
#
import xmlrpclib
import time
sock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/object')
def get(object, level=3, ending=None, ending_excl=None, recur=None, root=''):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
res = sock.execute('terp', 3, 'admin', 'account.invoice', 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
print root+'/'+k
if res[k]['type'] in recur:
print root+'/'+k
if (res[k]['type'] in recur) and (level>0):
get(res[k]['relation'], level-1, ending, ending_excl, recur, root+'/'+k)
print 'Field selection for a rields', '='*40
get('account.invoice', level=0, ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
print
print 'Field selection for a repeatIn', '='*40
get('account.invoice', level=0, ending=['one2many','many2many'], recur=['many2one'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EliteTK/PyBot | Modules/requests/packages/urllib3/util/retry.py | 699 | 9924 | import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| gpl-3.0 |
samthor/intellij-community | python/lib/Lib/_strptime.py | 90 | 19538 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import date as datetime_date
try:
from thread import allocate_lock as _thread_allocate_lock
except:
from dummy_thread import allocate_lock as _thread_allocate_lock
__author__ = "Brett Cannon"
__email__ = "[email protected]"
__all__ = ['strptime']
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (01,22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super(TimeRE, self)
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the format string."""
global _TimeRE_cache, _regex_cache
_cache_lock.acquire()
try:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError, err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format))
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format)
_regex_cache[format] = format_regex
finally:
_cache_lock.release()
found = format_regex.match(data_string)
if not found:
raise ValueError("time data did not match format: data=%s fmt=%s" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = 0
tz = -1
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz))
| apache-2.0 |
vitorio/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/api.py | 359 | 2965 | """Import this module for easy access to TLS Lite objects.
The TLS Lite API consists of classes, functions, and variables spread
throughout this package. Instead of importing them individually with::
from tlslite.TLSConnection import TLSConnection
from tlslite.HandshakeSettings import HandshakeSettings
from tlslite.errors import *
.
.
It's easier to do::
from tlslite.api import *
This imports all the important objects (TLSConnection, Checker,
HandshakeSettings, etc.) into the global namespace. In particular, it
imports::
from constants import AlertLevel, AlertDescription, Fault
from errors import *
from Checker import Checker
from HandshakeSettings import HandshakeSettings
from Session import Session
from SessionCache import SessionCache
from SharedKeyDB import SharedKeyDB
from TLSConnection import TLSConnection
from VerifierDB import VerifierDB
from X509 import X509
from X509CertChain import X509CertChain
from integration.HTTPTLSConnection import HTTPTLSConnection
from integration.POP3_TLS import POP3_TLS
from integration.IMAP4_TLS import IMAP4_TLS
from integration.SMTP_TLS import SMTP_TLS
from integration.XMLRPCTransport import XMLRPCTransport
from integration.TLSSocketServerMixIn import TLSSocketServerMixIn
from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn
from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper
from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded,
gmpyLoaded, pycryptoLoaded, prngName
from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey,
parseAsPublicKey, parsePrivateKey
"""
from constants import AlertLevel, AlertDescription, Fault
from errors import *
from Checker import Checker
from HandshakeSettings import HandshakeSettings
from Session import Session
from SessionCache import SessionCache
from SharedKeyDB import SharedKeyDB
from TLSConnection import TLSConnection
from VerifierDB import VerifierDB
from X509 import X509
from X509CertChain import X509CertChain
from integration.HTTPTLSConnection import HTTPTLSConnection
from integration.TLSSocketServerMixIn import TLSSocketServerMixIn
from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn
from integration.POP3_TLS import POP3_TLS
from integration.IMAP4_TLS import IMAP4_TLS
from integration.SMTP_TLS import SMTP_TLS
from integration.XMLRPCTransport import XMLRPCTransport
try:
import twisted
del(twisted)
from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper
except ImportError:
pass
from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \
pycryptoLoaded, prngName
from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \
parseAsPublicKey, parsePrivateKey
| apache-2.0 |
kitefishlabs/CorpusDB2 | corpusdb2/nodegraph.py | 1 | 6336 | # nodegraph.py - nodegraphs
# CorpusDB2 - Corpus-based processing for audio.
"""
Graph of Nodes.
Nodes encapsulate audio processsing.
1:M relationship to source file (optional).
1:1 relationship to (potential) DataCollections.
"""
__version__ = '1.0'
__author__ = 'Thomas Stoll'
__copyright__ = "Copyright (C) 2014 Thomas Stoll, Kitefish Labs, All Rights Reserved"
__license__ = "gpl 2.0 or higher"
__email__ = '[email protected]'
import os, json
import numpy as np
from bregman.features import LinearFrequencySpectrum, LogFrequencySpectrum, MelFrequencySpectrum, MelFrequencyCepstrum, Chromagram, dBPower
from scikits.audiolab import wavread
# DEFAULT_IMAGESC_KWARGS={'origin':'upper', 'cmap':P.cm.hot, 'aspect':'auto', 'interpolation':'nearest'}
"""
These are the default params/metadata for the feature extractors:
{
'rootpath' : '~/comp/corpusdb2/fulltest/',
'filename' : 'cage.wav',
'feature' : LinearFrequencySpectrum,
'sr': 44100, # The audio sample rate
'nbpo': 12, # Number of Bands Per Octave for front-end filterbank
'ncoef' : 10, # Number of cepstral coefficients to use for cepstral features
'lcoef' : 1, # Starting cepstral coefficient
'lo': 62.5, # Lowest band edge frequency of filterbank
'hi': 16000, # Highest band edge frequency of filterbank
'nfft': 16384, # FFT length for filterbank
'wfft': 8192, # FFT signal window length
'nhop': 4410, # FFT hop size
'window' : 'hamm', # FFT window type
'log10': False, # Whether to use log output
'magnitude': True, # Whether to use magnitude (False=power)
'power_ext': ".power", # File extension for power files
'intensify' : False, # Whether to use critical band masking in chroma extraction
'verbosity' : 1, # How much to tell the user about extraction
'available_features' : [
LinearFrequencySpectrum, # all 6 available Bregman features
LogFrequencySpectrum,
MelFrequencySpectrum,
MelFrequencyCepstrum,
Chromagram,
dBPower]
}
"""
class BregmanNodeGraph(object):
"""
Based on Features class of Bregman Audio Toolkit.
"""
rawaudio = None
sr = 0
fmt = ''
X = None
# _feature = None
def __init__(self, arg=None, metadata=None):
self._initialize(metadata)
def _initialize(self, metadata):
"""
Initialize important parameters
"""
self.metadata = self.default_metadata()
self._check_metadata(metadata)
@staticmethod
def default_metadata():
""" metadata == analysis params + available features """
metadata = {
'rootpath' : '~/comp/corpusdb2/fulltest/',
'filename' : 'cage.wav',
'feature' : LinearFrequencySpectrum,
'sr': 44100,
'fmt' : 'pcm16',
'nbpo': 12,
'ncoef' : 10,
'lcoef' : 1,
'lo': 62.5,
'hi': 16000,
'nfft': 16384,
'wfft': 8192,
'nhop': 4410,
'window' : 'hamm',
'intensify' : False,
'verbosity' : 0,
'available_features' : {
'LinearFrequencySpectrum' : '.linfreqspeq',
'LogFrequencySpectrum' : '.logfreqspeq',
'MelFrequencySpectrum' : '.melfreqspeq',
'MelFrequencyCepstrum' : '.mfcc',
'Chroma' : '.chroma',
'dBPower' : '.dbp'
}
}
return metadata
def _check_metadata(self, metadata=None):
self.metadata = metadata if metadata is not None else self.metadata
md = self.default_metadata()
for k in md.keys():
self.metadata[k] = self.metadata.get(k, md[k])
self.__setattr__(k, self.metadata[k])
return self.metadata
def __repr__(self):
return "%s | %s | %s" % (self.rootpath, self.filename, self.feature)
def _read_wav_file(self):
"""
Simply read raw audio data into class var.
"""
fullsndpath = os.path.join(os.path.expanduser(self.rootpath), 'snd', self.filename)
try:
self.rawaudio, self.sr, self.fmt = wavread(fullsndpath)
except IOError:
return "IOError! WAV read failed!"
return self.rawaudio
def get_full_ngpath(self, mflag=False, alt=None):
# basename, just in case?
dir = 'ng'
if alt is not None:
dir = str(alt)
filename = os.path.basename(self.filename)
extstring = self.available_features[self.feature.__class__.__name__] # well aren't we clever
# print 'dir: ', dir
if mflag:
extstring += ".json"
return os.path.join(
os.path.expanduser(self.rootpath),
dir,
(str(filename)+extstring))
def process_wav_file(self, filename=None, ftr=None):
# filename != None means the file name was passed in as an arg
if filename is not None:
self.metadata.filename = os.path.basename(filename)
self._read_wav_file()
# ftr != None means the feature name was passed in as an arg
if ftr is None:
ftr = self.feature
if self.rawaudio is not None:
# ftr is a class
self.feature = ftr(self.rawaudio)
self.X = self.feature.X
self.dims = np.shape(self.X)
extstring = self.available_features[self.feature.__class__.__name__] # well aren't we clever
md_filepath = self.get_full_ngpath(mflag=True)
clean_md = self.metadata
clean_md['feature'] = clean_md['feature'].__name__
j = json.dumps(self.metadata, indent=4)
f = open(md_filepath, 'w')
print >> f, j
f.close()
return self.X, self.dims
else:
return None
"""
if type(arg)==P.ndarray:
self.set_audio(arg, sr=self.sr)
self.extract()
elif type(arg)==str:
if arg:
self.load_audio(arg) # open file as MONO signal
self.extract()
""" | gpl-3.0 |
blackbliss/callme | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| mit |
hynekcer/django | tests/admin_inlines/models.py | 276 | 6824 | """
Testing of admin inline formsets.
"""
from __future__ import unicode_literals
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super(NonAutoPKBook, self).save(*args, **kwargs)
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause |
giacomov/3ML | threeML/utils/spectrum/pha_spectrum.py | 1 | 34989 | from __future__ import division
from builtins import range
from past.utils import old_div
import collections
import astropy.io.fits as fits
import numpy as np
import os
import warnings
import six
from threeML.io.progress_bar import progress_bar
from threeML.utils.OGIP.response import OGIPResponse, InstrumentResponse
from threeML.utils.OGIP.pha import PHAII
from threeML.utils.spectrum.binned_spectrum import BinnedSpectrumWithDispersion, Quality
from threeML.utils.spectrum.binned_spectrum_set import BinnedSpectrumSet
from threeML.utils.time_interval import TimeIntervalSet
_required_keywords = {}
_required_keywords["observed"] = (
"mission:TELESCOP,instrument:INSTRUME,filter:FILTER,"
+ "exposure:EXPOSURE,backfile:BACKFILE,"
+ "respfile:RESPFILE,"
+ "ancrfile:ANCRFILE,hduclass:HDUCLASS,"
+ "hduclas1:HDUCLAS1,poisserr:POISSERR,"
+ "chantype:CHANTYPE,detchans:DETCHANS,"
"backscal:BACKSCAL"
).split(",")
# python types, not fits
_required_keyword_types = {"POISSERR": bool}
# hduvers:HDUVERS
_required_keywords["background"] = (
"mission:TELESCOP,instrument:INSTRUME,filter:FILTER,"
+ "exposure:EXPOSURE,"
+ "hduclass:HDUCLASS,"
+ "hduclas1:HDUCLAS1,poisserr:POISSERR,"
+ "chantype:CHANTYPE,detchans:DETCHANS,"
"backscal:BACKSCAL"
).split(",")
# hduvers:HDUVERS
_might_be_columns = {}
_might_be_columns["observed"] = (
"EXPOSURE,BACKFILE," + "CORRFILE,CORRSCAL," + "RESPFILE,ANCRFILE," "BACKSCAL"
).split(",")
_might_be_columns["background"] = ("EXPOSURE,BACKSCAL").split(",")
def _read_pha_or_pha2_file(
pha_file_or_instance,
spectrum_number=None,
file_type="observed",
rsp_file=None,
arf_file=None,
treat_as_time_series=False,
):
"""
A function to extract information from pha and pha2 files. It is kept separate because the same method is
used for reading time series (MUCH faster than building a lot of individual spectra) and single spectra.
:param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
:param spectrum_number: (optional) the spectrum number of the TypeII file to be used
:param file_type: observed or background
:param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
:param arf_file: (optional) and ARF filename
:param treat_as_time_series:
:return:
"""
assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
pha_file_or_instance, PHAII
), "Must provide a FITS file name or PHAII instance"
if isinstance(pha_file_or_instance, six.string_types):
ext = os.path.splitext(pha_file_or_instance)[-1]
if "{" in ext:
spectrum_number = int(ext.split("{")[-1].replace("}", ""))
pha_file_or_instance = pha_file_or_instance.split("{")[0]
# Read the data
filename = pha_file_or_instance
# create a FITS_FILE instance
pha_file_or_instance = PHAII.from_fits_file(pha_file_or_instance)
# If this is already a FITS_FILE instance,
elif isinstance(pha_file_or_instance, PHAII):
# we simply create a dummy filename
filename = "pha_instance"
else:
raise RuntimeError("This is a bug")
file_name = filename
assert file_type.lower() in [
"observed",
"background",
], "Unrecognized filetype keyword value"
file_type = file_type.lower()
try:
HDUidx = pha_file_or_instance.index_of("SPECTRUM")
except:
raise RuntimeError(
"The input file %s is not in PHA format" % (pha_file_or_instance)
)
# spectrum_number = spectrum_number
spectrum = pha_file_or_instance[HDUidx]
data = spectrum.data
header = spectrum.header
# We don't support yet the rescaling
if "CORRFILE" in header:
if (header.get("CORRFILE").upper().strip() != "NONE") and (
header.get("CORRFILE").upper().strip() != ""
):
raise RuntimeError("CORRFILE is not yet supported")
# See if there is there is a QUALITY==0 in the header
if "QUALITY" in header:
has_quality_column = False
if header["QUALITY"] == 0:
is_all_data_good = True
else:
is_all_data_good = False
else:
if "QUALITY" in data.columns.names:
has_quality_column = True
is_all_data_good = False
else:
has_quality_column = False
is_all_data_good = True
warnings.warn(
"Could not find QUALITY in columns or header of PHA file. This is not a valid OGIP file. Assuming QUALITY =0 (good)"
)
# looking for tstart and tstop
tstart = None
tstop = None
has_tstart = False
has_tstop = False
has_telapse = False
if "TSTART" in header:
has_tstart_column = False
has_tstart = True
else:
if "TSTART" in data.columns.names:
has_tstart_column = True
has_tstart = True
if "TELAPSE" in header:
has_telapse_column = False
has_telapse = True
else:
if "TELAPSE" in data.columns.names:
has_telapse_column = True
has_telapse = True
if "TSTOP" in header:
has_tstop_column = False
has_tstop = True
else:
if "TSTOP" in data.columns.names:
has_tstop_column = True
has_tstop = True
if has_tstop and has_telapse:
warnings.warn("Found TSTOP and TELAPSE. This file is invalid. Using TSTOP.")
has_telapse = False
# Determine if this file contains COUNTS or RATES
if "COUNTS" in data.columns.names:
has_rates = False
data_column_name = "COUNTS"
elif "RATE" in data.columns.names:
has_rates = True
data_column_name = "RATE"
else:
raise RuntimeError(
"This file does not contain a RATE nor a COUNTS column. "
"This is not a valid PHA file"
)
# Determine if this is a PHA I or PHA II
if len(data.field(data_column_name).shape) == 2:
typeII = True
if spectrum_number == None and not treat_as_time_series:
raise RuntimeError(
"This is a PHA Type II file. You have to provide a spectrum number"
)
else:
typeII = False
# Collect information from mandatory keywords
keys = _required_keywords[file_type]
gathered_keywords = {}
for k in keys:
internal_name, keyname = k.split(":")
key_has_been_collected = False
if keyname in header:
if (
keyname in _required_keyword_types
and type(header.get(keyname)) is not _required_keyword_types[keyname]
):
warnings.warn(
"unexpected type of %(keyname)s, expected %(expected_type)s\n found %(found_type)s: %(found_value)s"
% dict(
keyname=keyname,
expected_type=_required_keyword_types[keyname],
found_type=type(header.get(keyname)),
found_value=header.get(keyname),
)
)
else:
gathered_keywords[internal_name] = header.get(keyname)
# Fix "NONE" in None
if (
gathered_keywords[internal_name] == "NONE"
or gathered_keywords[internal_name] == "none"
):
gathered_keywords[internal_name] = None
key_has_been_collected = True
# Note that we check again because the content of the column can override the content of the header
if keyname in _might_be_columns[file_type] and typeII:
# Check if there is a column with this name
if keyname in data.columns.names:
# This will set the exposure, among other things
if not treat_as_time_series:
# if we just want a single spectrum
gathered_keywords[internal_name] = data[keyname][
spectrum_number - 1
]
else:
# else get all the columns
gathered_keywords[internal_name] = data[keyname]
# Fix "NONE" in None
if (
gathered_keywords[internal_name] == "NONE"
or gathered_keywords[internal_name] == "none"
):
gathered_keywords[internal_name] = None
key_has_been_collected = True
if not key_has_been_collected:
# The keyword POISSERR is a special case, because even if it is missing,
# it is assumed to be False if there is a STAT_ERR column in the file
if keyname == "POISSERR" and "STAT_ERR" in data.columns.names:
warnings.warn(
"POISSERR is not set. Assuming non-poisson errors as given in the "
"STAT_ERR column"
)
gathered_keywords["poisserr"] = False
elif keyname == "ANCRFILE":
# Some non-compliant files have no ARF because they don't need one. Don't fail, but issue a
# warning
warnings.warn(
"ANCRFILE is not set. This is not a compliant OGIP file. Assuming no ARF."
)
gathered_keywords["ancrfile"] = None
elif keyname == "FILTER":
# Some non-compliant files have no FILTER because they don't need one. Don't fail, but issue a
# warning
warnings.warn(
"FILTER is not set. This is not a compliant OGIP file. Assuming no FILTER."
)
gathered_keywords["filter"] = None
else:
raise RuntimeError(
"Keyword %s not found. File %s is not a proper PHA "
"file" % (keyname, filename)
)
is_poisson = gathered_keywords["poisserr"]
exposure = gathered_keywords["exposure"]
# now we need to get the response file so that we can extract the EBOUNDS
if file_type == "observed":
if rsp_file is None:
# this means it should be specified in the header
rsp_file = gathered_keywords["respfile"]
if arf_file is None:
arf_file = gathered_keywords["ancrfile"]
# Read in the response
if isinstance(rsp_file, six.string_types) or isinstance(rsp_file, str):
rsp = OGIPResponse(rsp_file, arf_file=arf_file)
else:
# assume a fully formed OGIPResponse
rsp = rsp_file
if file_type == "background":
# we need the rsp ebounds from response to build the histogram
assert isinstance(
rsp_file, InstrumentResponse
), "You must supply and OGIPResponse to extract the energy bounds"
rsp = rsp_file
# Now get the data (counts or rates) and their errors. If counts, transform them in rates
if typeII:
# PHA II file
if has_rates:
if not treat_as_time_series:
rates = data.field(data_column_name)[spectrum_number - 1, :]
rate_errors = None
if not is_poisson:
rate_errors = data.field("STAT_ERR")[spectrum_number - 1, :]
else:
rates = data.field(data_column_name)
rate_errors = None
if not is_poisson:
rate_errors = data.field("STAT_ERR")
else:
if not treat_as_time_series:
rates = old_div(
data.field(data_column_name)[spectrum_number - 1, :], exposure
)
rate_errors = None
if not is_poisson:
rate_errors = old_div(
data.field("STAT_ERR")[spectrum_number - 1, :], exposure
)
else:
rates = old_div(data.field(data_column_name), np.atleast_2d(exposure).T)
rate_errors = None
if not is_poisson:
rate_errors = old_div(
data.field("STAT_ERR"), np.atleast_2d(exposure).T
)
if "SYS_ERR" in data.columns.names:
if not treat_as_time_series:
sys_errors = data.field("SYS_ERR")[spectrum_number - 1, :]
else:
sys_errors = data.field("SYS_ERR")
else:
sys_errors = np.zeros(rates.shape)
if has_quality_column:
if not treat_as_time_series:
try:
quality = data.field("QUALITY")[spectrum_number - 1, :]
except (IndexError):
# GBM CSPEC files do not follow OGIP conventions and instead
# list simply QUALITY=0 for each spectrum
# so we have to read them differently
quality_element = data.field("QUALITY")[spectrum_number - 1]
warnings.warn(
"The QUALITY column has the wrong shape. This PHAII file does not follow OGIP standards"
)
if quality_element == 0:
quality = np.zeros_like(rates, dtype=int)
else:
quality = np.zeros_like(rates, dtype=int) + 5
else:
# we need to be careful again because the QUALITY column is not always the correct shape
quality_element = data.field("QUALITY")
if quality_element.shape == rates.shape:
# This is the proper way for the quality to be stored
quality = quality_element
else:
quality = np.zeros_like(rates, dtype=int)
for i, q in enumerate(quality_element):
if q != 0:
quality[i, :] = 5
else:
if is_all_data_good:
quality = np.zeros_like(rates, dtype=int)
else:
quality = np.zeros_like(rates, dtype=int) + 5
if has_tstart:
if has_tstart_column:
if not treat_as_time_series:
tstart = data.field("TSTART")[spectrum_number - 1]
else:
tstart = data.field("TSTART")
if has_tstop:
if has_tstop_column:
if not treat_as_time_series:
tstop = data.field("TSTOP")[spectrum_number - 1]
else:
tstop = data.field("TSTOP")
if has_telapse:
if has_telapse_column:
if not treat_as_time_series:
tstop = tstart + data.field("TELAPSE")[spectrum_number - 1]
else:
tstop = tstart + data.field("TELAPSE")
elif typeII == False:
assert (
not treat_as_time_series
), "This is not a PHAII file but you specified to treat it as a time series"
# PHA 1 file
if has_rates:
rates = data.field(data_column_name)
rate_errors = None
if not is_poisson:
rate_errors = data.field("STAT_ERR")
else:
rates = old_div(data.field(data_column_name), exposure)
rate_errors = None
if not is_poisson:
rate_errors = old_div(data.field("STAT_ERR"), exposure)
if "SYS_ERR" in data.columns.names:
sys_errors = data.field("SYS_ERR")
else:
sys_errors = np.zeros(rates.shape)
if has_quality_column:
quality = data.field("QUALITY")
else:
if is_all_data_good:
quality = np.zeros_like(rates, dtype=int)
else:
quality = np.zeros_like(rates, dtype=int) + 5
# read start and stop times if needed
if has_tstart:
if has_tstart_column:
tstart = data.field("TSTART")
else:
tstart = header["TSTART"]
if has_tstop:
if has_tstop_column:
tstop = data.field("TSTOP")
else:
tstop = header["TSTOP"]
if has_telapse:
if has_telapse_column:
tstop = tstart + data.field("TELAPSE")
else:
tstop = tstart + header["TELAPSE"]
# Now that we have read it, some safety checks
assert rates.shape[0] == gathered_keywords["detchans"], (
"The data column (RATES or COUNTS) has a different number of entries than the "
"DETCHANS declared in the header"
)
quality = Quality.from_ogip(quality)
if not treat_as_time_series:
counts = rates * exposure
if not is_poisson:
count_errors = rate_errors * exposure
else:
count_errors = None
else:
exposure = np.atleast_2d(exposure).T
counts = rates * exposure
if not is_poisson:
count_errors = rate_errors * exposure
else:
count_errors = None
out = collections.OrderedDict(
counts=counts,
count_errors=count_errors,
rates=rates,
rate_errors=rate_errors,
sys_errors=sys_errors,
exposure=exposure,
is_poisson=is_poisson,
rsp=rsp,
gathered_keywords=gathered_keywords,
quality=quality,
file_name=file_name,
tstart=tstart,
tstop=tstop,
)
return out
class PHASpectrum(BinnedSpectrumWithDispersion):
def __init__(
self,
pha_file_or_instance,
spectrum_number=None,
file_type="observed",
rsp_file=None,
arf_file=None,
):
"""
A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
bounds can be obtained.
:param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
:param spectrum_number: (optional) the spectrum number of the TypeII file to be used
:param file_type: observed or background
:param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
:param arf_file: (optional) and ARF filename
"""
# extract the spectrum number if needed
assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
pha_file_or_instance, PHAII
), "Must provide a FITS file name or PHAII instance"
pha_information = _read_pha_or_pha2_file(
pha_file_or_instance,
spectrum_number,
file_type,
rsp_file,
arf_file,
treat_as_time_series=False,
)
# default the grouping to all open bins
# this will only be altered if the spectrum is rebinned
self._grouping = np.ones_like(pha_information["counts"])
# this saves the extra properties to the class
self._gathered_keywords = pha_information["gathered_keywords"]
self._file_type = file_type
self._file_name = pha_information["file_name"]
# pass the needed spectrum values back up
# remember that Spectrum reads counts, but returns
# rates!
super(PHASpectrum, self).__init__(
counts=pha_information["counts"],
exposure=pha_information["exposure"],
response=pha_information["rsp"],
count_errors=pha_information["count_errors"],
sys_errors=pha_information["sys_errors"],
is_poisson=pha_information["is_poisson"],
quality=pha_information["quality"],
mission=pha_information["gathered_keywords"]["mission"],
instrument=pha_information["gathered_keywords"]["instrument"],
tstart=pha_information["tstart"],
tstop=pha_information["tstop"],
)
def _return_file(self, key):
if key in self._gathered_keywords:
return self._gathered_keywords[key]
else:
return None
def set_ogip_grouping(self, grouping):
"""
If the counts are rebinned, this updates the grouping
:param grouping:
"""
self._grouping = grouping
@property
def filename(self):
return self._file_name
@property
def background_file(self):
"""
Returns the background file definied in the header, or None if there is none defined
p
:return: a path to a file, or None
"""
back_file = self._return_file('backfile')
if back_file == "":
back_file = None
return back_file
@property
def scale_factor(self):
"""
This is a scale factor (in the BACKSCAL keyword) which must be used to rescale background and source
regions
:return:
"""
return self._gathered_keywords["backscal"]
@property
def response_file(self):
"""
Returns the response file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("respfile")
@property
def ancillary_file(self):
"""
Returns the ancillary file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("ancrfile")
@property
def grouping(self):
return self._grouping
def clone(
self,
new_counts=None,
new_count_errors=None,
new_exposure=None,
new_scale_factor=None,
):
"""
make a new spectrum with new counts and errors and all other
parameters the same
:param new_exposure: the new exposure for the clone
:param new_scale_factor: the new scale factor for the clone
:param new_counts: new counts for the spectrum
:param new_count_errors: new errors from the spectrum
:return: new pha spectrum
"""
if new_exposure is None:
new_exposure = self.exposure
if new_counts is None:
new_counts = self.counts
new_count_errors = self.count_errors
if new_count_errors is None:
stat_err = None
else:
stat_err = old_div(new_count_errors, new_exposure)
if self._tstart is None:
tstart = 0
else:
tstart = self._tstart
if self._tstop is None:
telapse = new_exposure
else:
telapse = self._tstop - tstart
if new_scale_factor is None:
new_scale_factor = self.scale_factor
# create a new PHAII instance
pha = PHAII(
instrument_name=self.instrument,
telescope_name=self.mission,
tstart=tstart,
telapse=telapse,
channel=list(range(1, len(self) + 1)),
rate=old_div(new_counts, self.exposure),
stat_err=stat_err,
quality=self.quality.to_ogip(),
grouping=self.grouping,
exposure=new_exposure,
backscale=new_scale_factor,
respfile=None,
ancrfile=None,
is_poisson=self.is_poisson,
)
return pha
@classmethod
def from_dispersion_spectrum(cls, dispersion_spectrum, file_type="observed"):
# type: (BinnedSpectrumWithDispersion, str) -> PHASpectrum
if dispersion_spectrum.is_poisson:
rate_errors = None
else:
rate_errors = dispersion_spectrum.rate_errors
if dispersion_spectrum.tstart is None:
tstart = 0
else:
tstart = dispersion_spectrum.tstart
if dispersion_spectrum.tstop is None:
telapse = dispersion_spectrum.exposure
else:
telapse = dispersion_spectrum.tstop - tstart
pha = PHAII(
instrument_name=dispersion_spectrum.instrument,
telescope_name=dispersion_spectrum.mission,
tstart=tstart, # TODO: add this in so that we have proper time!
telapse=telapse,
channel=list(range(1, len(dispersion_spectrum) + 1)),
rate=dispersion_spectrum.rates,
stat_err=rate_errors,
quality=dispersion_spectrum.quality.to_ogip(),
grouping=np.ones(len(dispersion_spectrum)),
exposure=dispersion_spectrum.exposure,
backscale=dispersion_spectrum.scale_factor,
respfile=None,
ancrfile=None,
is_poisson=dispersion_spectrum.is_poisson,
)
return cls(
pha_file_or_instance=pha,
spectrum_number=1,
file_type=file_type,
rsp_file=dispersion_spectrum.response,
)
class PHASpectrumSet(BinnedSpectrumSet):
def __init__(
self, pha_file_or_instance, file_type="observed", rsp_file=None, arf_file=None
):
"""
A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
bounds can be obtained.
:param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
:param spectrum_number: (optional) the spectrum number of the TypeII file to be used
:param file_type: observed or background
:param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
:param arf_file: (optional) and ARF filename
"""
# extract the spectrum number if needed
assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
pha_file_or_instance, PHAII
), "Must provide a FITS file name or PHAII instance"
with fits.open(pha_file_or_instance) as f:
try:
HDUidx = f.index_of("SPECTRUM")
except:
raise RuntimeError(
"The input file %s is not in PHA format" % (pha2_file)
)
spectrum = f[HDUidx]
data = spectrum.data
if "COUNTS" in data.columns.names:
has_rates = False
data_column_name = "COUNTS"
elif "RATE" in data.columns.names:
has_rates = True
data_column_name = "RATE"
else:
raise RuntimeError(
"This file does not contain a RATE nor a COUNTS column. "
"This is not a valid PHA file"
)
# Determine if this is a PHA I or PHA II
if len(data.field(data_column_name).shape) == 2:
num_spectra = data.field(data_column_name).shape[0]
else:
raise RuntimeError("This appears to be a PHA I and not PHA II file")
pha_information = _read_pha_or_pha2_file(
pha_file_or_instance,
None,
file_type,
rsp_file,
arf_file,
treat_as_time_series=True,
)
# default the grouping to all open bins
# this will only be altered if the spectrum is rebinned
self._grouping = np.ones_like(pha_information["counts"])
# this saves the extra properties to the class
self._gathered_keywords = pha_information["gathered_keywords"]
self._file_type = file_type
# need to see if we have count errors, tstart, tstop
# if not, we create an list of None
if pha_information["count_errors"] is None:
count_errors = [None] * num_spectra
else:
count_errors = pha_information["count_errors"]
if pha_information["tstart"] is None:
tstart = [None] * num_spectra
else:
tstart = pha_information["tstart"]
if pha_information["tstop"] is None:
tstop = [None] * num_spectra
else:
tstop = pha_information["tstop"]
# now build the list of binned spectra
list_of_binned_spectra = []
with progress_bar(num_spectra, title="Loading PHAII spectra") as p:
for i in range(num_spectra):
list_of_binned_spectra.append(
BinnedSpectrumWithDispersion(
counts=pha_information["counts"][i],
exposure=pha_information["exposure"][i, 0],
response=pha_information["rsp"],
count_errors=count_errors[i],
sys_errors=pha_information["sys_errors"][i],
is_poisson=pha_information["is_poisson"],
quality=pha_information["quality"].get_slice(i),
mission=pha_information["gathered_keywords"]["mission"],
instrument=pha_information["gathered_keywords"]["instrument"],
tstart=tstart[i],
tstop=tstop[i],
)
)
p.increase()
# now get the time intervals
start_times = data.field("TIME")
stop_times = data.field("ENDTIME")
time_intervals = TimeIntervalSet.from_starts_and_stops(start_times, stop_times)
reference_time = 0
# see if there is a reference time in the file
if "TRIGTIME" in spectrum.header:
reference_time = spectrum.header["TRIGTIME"]
for t_number in range(spectrum.header["TFIELDS"]):
if "TZERO%d" % t_number in spectrum.header:
reference_time = spectrum.header["TZERO%d" % t_number]
super(PHASpectrumSet, self).__init__(
list_of_binned_spectra,
reference_time=reference_time,
time_intervals=time_intervals,
)
def _return_file(self, key):
if key in self._gathered_keywords:
return self._gathered_keywords[key]
else:
return None
def set_ogip_grouping(self, grouping):
"""
If the counts are rebinned, this updates the grouping
:param grouping:
"""
self._grouping = grouping
@property
def filename(self):
return self._file_name
@property
def background_file(self):
"""
Returns the background file definied in the header, or None if there is none defined
p
:return: a path to a file, or None
"""
return self._return_file("backfile")
@property
def scale_factor(self):
"""
This is a scale factor (in the BACKSCAL keyword) which must be used to rescale background and source
regions
:return:
"""
return self._gathered_keywords["backscal"]
@property
def response_file(self):
"""
Returns the response file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("respfile")
@property
def ancillary_file(self):
"""
Returns the ancillary file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("ancrfile")
@property
def grouping(self):
return self._grouping
def clone(
self, new_counts=None, new_count_errors=None,
):
"""
make a new spectrum with new counts and errors and all other
parameters the same
:param new_counts: new counts for the spectrum
:param new_count_errors: new errors from the spectrum
:return: new pha spectrum
"""
if new_counts is None:
new_counts = self.counts
new_count_errors = self.count_errors
if new_count_errors is None:
stat_err = None
else:
stat_err = old_div(new_count_errors, self.exposure)
# create a new PHAII instance
pha = PHAII(
instrument_name=self.instrument,
telescope_name=self.mission,
tstart=0,
telapse=self.exposure,
channel=list(range(1, len(self) + 1)),
rate=old_div(new_counts, self.exposure),
stat_err=stat_err,
quality=self.quality.to_ogip(),
grouping=self.grouping,
exposure=self.exposure,
backscale=self.scale_factor,
respfile=None,
ancrfile=None,
is_poisson=self.is_poisson,
)
return pha
@classmethod
def from_dispersion_spectrum(cls, dispersion_spectrum, file_type="observed"):
# type: (BinnedSpectrumWithDispersion, str) -> PHASpectrum
if dispersion_spectrum.is_poisson:
rate_errors = None
else:
rate_errors = dispersion_spectrum.rate_errors
pha = PHAII(
instrument_name=dispersion_spectrum.instrument,
telescope_name=dispersion_spectrum.mission,
tstart=dispersion_spectrum.tstart,
telapse=dispersion_spectrum.tstop - dispersion_spectrum.tstart,
channel=list(range(1, len(dispersion_spectrum) + 1)),
rate=dispersion_spectrum.rates,
stat_err=rate_errors,
quality=dispersion_spectrum.quality.to_ogip(),
grouping=np.ones(len(dispersion_spectrum)),
exposure=dispersion_spectrum.exposure,
backscale=dispersion_spectrum.scale_factor,
respfile=None,
ancrfile=None,
is_poisson=dispersion_spectrum.is_poisson,
)
return cls(
pha_file_or_instance=pha,
spectrum_number=1,
file_type=file_type,
rsp_file=dispersion_spectrum.response,
)
| bsd-3-clause |
pjuu/pjuu | tests/test_parser.py | 1 | 5540 | # -*- coding: utf8 -*-
"""Post backend tests.
:license: AGPL v3, see LICENSE for more details
:copyright: 2014-2021 Joe Doherty
"""
from pjuu.auth.backend import create_account, activate
from pjuu.lib.parser import (parse_hashtags, parse_links, parse_mentions,
parse_post)
from tests import BackendTestCase
class ParserTests(BackendTestCase):
"""Ensure the text parser, parses correctly."""
def test_simple_url_http(self):
"""Simple HTTP urls"""
links = parse_links('Hello http://pjuu.com')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
def test_simple_url_https(self):
"""Simpe HTTPS urls"""
links = parse_links('Hello https://pjuu.com')
self.assertEqual(links[0]['link'], 'https://pjuu.com')
def test_urls_are_fixed(self):
"""Ensure simple link are fixed up."""
links = parse_links('Hello pjuu.com')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
self.assertEqual(links[0]['span'], (6, 14))
def test_anchors_in_urls(self):
"""Query strings and anchor points"""
links = parse_links('https://pjuu.com/joe?page=2#hello')
self.assertEqual(links[0]['link'], 'https://pjuu.com/joe?page=2#hello')
def test_weird_query_strings(self):
"""Ensure strange characters are handled"""
links = parse_links(
'http://pjuu.com:5000/a/post/url?page=1&q=abc,def#something')
self.assertEqual(
links[0]['link'],
'http://pjuu.com:5000/a/post/url?page=1&q=abc,def#something')
def test_hashtags_are_not_parsed(self):
"""Ensure achors are not parsed as hashtags"""
hashtags = parse_hashtags(
'http://pjuu.com:5000/a/post/url?page=1&q=abc,def#something')
self.assertEqual(len(hashtags), 0)
def test_urls_and_hashtags(self):
"""Hashtags intermixed with urls"""
links, mentions, hashtags = parse_post('pjuu.com/#bottom #plop')
self.assertEqual(links[0]['link'], 'http://pjuu.com/#bottom')
self.assertEqual(hashtags[0]['hashtag'], 'plop')
def test_short_hashtags(self):
"""Hashtags musy be more than 1 character long."""
hashtags = parse_hashtags('#cheese #j #jo #joe')
self.assertEqual(hashtags[0]['hashtag'], 'cheese')
self.assertEqual(hashtags[1]['hashtag'], 'jo')
self.assertEqual(hashtags[2]['hashtag'], 'joe')
def test_mention_no_user(self):
"""Find a user mention (doens't exist)"""
mentions = parse_mentions('@joe @ant', check_user=False)
self.assertEqual(mentions[0]['username'], 'joe')
self.assertEqual(mentions[0]['user_id'], 'NA')
self.assertEqual(mentions[0]['span'], (0, 4))
self.assertEqual(mentions[1]['username'], 'ant')
self.assertEqual(mentions[1]['user_id'], 'NA')
self.assertEqual(mentions[1]['span'], (5, 9))
def test_mention_real_user(self):
"""Find a user mentions (user does exist)"""
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
mentions = parse_mentions('@user1 @user2')
self.assertEqual(len(mentions), 1)
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(mentions[0]['user_id'], user1)
self.assertEqual(mentions[0]['span'], (0, 6))
def test_unicode_character(self):
"""Do unicode characters break things."""
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
links, mentions, hashtags = parse_post('၍ @user1, ☂pjuu.com, 㒅 #hash')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(hashtags[0]['hashtag'], 'hash')
def test_surrounding_characters(self):
"""Can parse objects be in parenthesis"""
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
links, mentions, hashtags = parse_post('(@user1), (pjuu.com), (#hash)')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(hashtags[0]['hashtag'], 'hash')
def test_parenethesis_in_paths(self):
"""Handle URLs surrounded by parenthesis and containing them."""
links = parse_links('(https://pjuu.com/user1)')
self.assertEqual(links[0]['link'], 'https://pjuu.com/user1')
links = parse_links('https://pjuu.com/user1(awesome)')
self.assertEqual(links[0]['link'], 'https://pjuu.com/user1(awesome)')
def test_quoting_mentions_hashtags(self):
"""Parenthesis around items"""
links = parse_links('"https://pjuu.com/user1"')
self.assertEqual(links[0]['link'], 'https://pjuu.com/user1')
hashtags = parse_hashtags('"#pjuu"')
self.assertEqual(hashtags[0]['hashtag'], 'pjuu')
mentions = parse_mentions('"@joe"', check_user=False)
self.assertEqual(mentions[0]['username'], 'joe')
def test_delimited(self):
"""Ensure hashtags can be delimited"""
hashtags = parse_hashtags('#pjuu\'s test')
self.assertEqual(hashtags[0]['hashtag'], 'pjuu')
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
mentions = parse_mentions('@user1\'s')
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(mentions[0]['user_id'], user1)
| agpl-3.0 |
hunch/hunch-gift-app | django/contrib/gis/db/backends/postgis/creation.py | 12 | 2905 | from django.conf import settings
from django.db.backends.postgresql.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_opts = 'GIST_GEOMETRY_OPS'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography:
# Geogrophy columns are created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns
if f.geography:
index_opts = ''
else:
index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_opts + ' );')
return output
def sql_table_creation_suffix(self):
qn = self.connection.ops.quote_name
return ' TEMPLATE %s' % qn(getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis'))
| mit |
PiafPowaz/MultiCode | multicode.py | 1 | 2682 | #DEFINE_TYPE_CODE#py
#sDEFINE_TYPE_CODE#py
# -*- coding: utf-8 -*-
import platform
import os
def main():
if int(platform.python_version_tuple()[0]) < 3:
fullPathFile = raw_input("File's path :")
else:
fullPathFile = input("File's path :")
pathFileNoExt = fullPathFile.split('.')[0]
nameFileNoExt = pathFileNoExt.split('\\')[-1]
pathFile = '/'.join(pathFileNoExt.split('\\')[:-1]) + '/'
if pathFile == []:
pathFile = '/'.join(pathFileNoExt.split('/')[:-1]) + '/'
nameFileNoExt = pahtFileNoExt.split('/')[-1]
newF = None
fileClosed = True
totNewFile = 0
fullPathNewFiles = []
if pathFile == '/':
pathFile = ''
pathNewFile = pathFile
nameNewFile = None
fullPathNewFile = None
with open(fullPathFile, 'r') as f:
for line in f:
define = line.split('#')
if fileClosed:
last_word = define[-1]
last_word = last_word.split('\n')
del define[-1]
define += last_word
for word in define:
if word == 'DEFINE_PATH_TYPE_CODE' and len(define) >= define.index(word)+2:
if nameNewFile == None:
nameCode = '.' + str(define[define.index(word)+1])
nameNewFile = nameFileNoExt + nameCode
pathNewFile = str(define[define.index(word)+2])
fullPathNewFile = pathNewFile + nameNewFile
if word == 'DEFINE_NAME_FILE_TYPE_CODE' and len(define) >= define.index(word)+2:
nameCode = '.' + str(define[define.index(word)+1])
nameNewFile = str(define[define.index(word)+2]) + nameCode
fullPathNewFile = pathNewFile + nameNewFile
if word == 'DEFINE_TYPE_CODE' and len(define) > define.index(word):
if fullPathNewFile == None:
if nameNewFile == None:
nameCode = '.' + str(define[define.index(word)+1])
nameNewFile = nameFileNoExt + nameCode
pathNewFile = pathFile
fullPathNewFile = pathNewFile + nameNewFile
newF = open(fullPathNewFile, 'w')
totNewFile += 1
fullPathNewFiles.append(fullPathNewFile)
fileClosed = False
firstLine = True
if word == 'END_DEFINE_TYPE_CODE' and len(define) > define.index(word):
if not fileClosed:
newF.close()
nameCode = None
fileClosed = True
pathNewFile = pathFile
nameNewFile = None
fullPathNewFile = None
if word == 'OS_CMD' and len(define) > define.index(word):
os.system(str(define[define.index(word)+1]))
if newF != None and not fileClosed:
if not firstLine:
newF.write(line)
else:
firstLine = False
print('New files :', totNewFile)
for fullPathNewFile in fullPathNewFiles:
print(fullPathNewFile)
main()
#sEND_DEFINE_TYPE_CODE#py
#END_DEFINE_TYPE_CODE#py
| gpl-3.0 |
nigelb/simpleui | simpleui/cli_impl/impl.py | 1 | 2102 | # simpleui implements a number of simple UI patterns with fallback to CLI if the
# selected GUI fails.
#
# Copyright (C) 2012 NigelB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import getpass
class cli_impl:
ui_type = "cli_impl"
def prompt_credentials(self, service):
print "Please enter you your Credentials for %s: "%service
username = raw_input("Username: ")
password = getpass.getpass("Password: ")
return (True, username, password)
def prompt_file_selector(self, title="Enter the filename:", start_dir=".", type=""):
return [raw_input(title)]
def prompt_yes_no(self, message):
input = raw_input("%s [Y/N]: "%message)
if not len(input): return self.prompt_yes_no(message)
try:
return {"Y":True,"N":false}[input[0].upper()]
except Exception as e:
return self.prompt_yes_no(message)
def prompt_list(self, title, prompt, data, multi_select=False):
print(title)
for item in range(len(data)):
print ("\t%i. %s"%(item,data[item]))
toRet = []
if multi_select is False:
return [int(raw_input(prompt))]
else:
print ()
print ('Enter as many as required then enter "f" to finish.')
print ()
try:
while True:
toRet.append(int(raw_input("%s "%prompt)))
except ValueError as v:
pass
print ()
return toRet
| gpl-3.0 |
lowitty/sendtrap | lib/pysnmp/entity/rfc3413/ntforg.py | 3 | 16905 | import sys
from pyasn1.compat.octets import null
from pysnmp.entity.rfc3413 import config
from pysnmp.proto.proxy import rfc2576
from pysnmp.proto.api import v2c
from pysnmp.proto import error
from pysnmp import nextid
from pysnmp import debug
getNextHandle = nextid.Integer(0x7fffffff)
class NotificationOriginator:
acmID = 3 # default MIB access control method to use
def __init__(self, snmpContext):
self.__pendingReqs = {}
self.__pendingNotifications = {}
self.snmpContext = snmpContext
def processResponsePdu(
self,
snmpEngine,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
statusInformation,
sendPduHandle,
cbInfo
):
(cbFun, cbCtx) = cbInfo
# 3.3.6d
if sendPduHandle not in self.__pendingReqs:
raise error.ProtocolError('Missing sendPduHandle %s' % sendPduHandle)
( origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPdu,
origTimeout,
origRetryCount,
origRetries,
metaSendPduHandle
) = self.__pendingReqs[sendPduHandle]
del self.__pendingReqs[sendPduHandle]
self.__pendingNotifications[metaSendPduHandle] -= 1
snmpEngine.transportDispatcher.jobFinished(id(self))
if statusInformation:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s, sendPduHandle %s statusInformation %s' % (metaSendPduHandle, sendPduHandle, statusInformation))
if origRetries == origRetryCount:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s, sendPduHandle %s retry count %d exceeded' % (metaSendPduHandle, sendPduHandle, origRetries))
if not self.__pendingNotifications[metaSendPduHandle]:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(origTimeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
# User-side API assumes SMIv2
if messageProcessingModel == 0:
reqPDU = rfc2576.v2ToV1(origPdu)
pduVersion = 0
else:
reqPDU = origPdu
pduVersion = 1
# 3.3.6a
try:
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
pduVersion,
reqPDU,
1, # expectResponse
timeoutInTicks,
self.processResponsePdu,
(cbFun, cbCtx)
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s: sendPdu() failed with %r ' % (metaSendPduHandle, statusInformation))
if not self.__pendingNotifications[metaSendPduHandle]:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return
self.__pendingNotifications[metaSendPduHandle] += 1
snmpEngine.transportDispatcher.jobStarted(id(self))
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s, sendPduHandle %s, timeout %d, retry %d of %d' % (metaSendPduHandle, sendPduHandle, origTimeout, origRetries, origRetryCount))
# 3.3.6b
self.__pendingReqs[sendPduHandle] = (
origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPdu,
origTimeout,
origRetryCount,
origRetries + 1,
metaSendPduHandle
)
return
# 3.3.6c
if not self.__pendingNotifications[metaSendPduHandle]:
del self.__pendingNotifications[metaSendPduHandle]
# User-side API assumes SMIv2
if messageProcessingModel == 0:
PDU = rfc2576.v1ToV2(PDU, origPdu)
self._handleResponse(metaSendPduHandle, None,
v2c.apiPDU.getErrorStatus(PDU),
v2c.apiPDU.getErrorIndex(PDU,muteErrors=True),
v2c.apiPDU.getVarBinds(PDU),
cbFun, cbCtx)
def _handleResponse(self,
sendRequestHandle,
errorIndication,
errorStatus, errorIndex,
varBinds,
cbFun, cbCtx):
try:
# we need to pass response PDU information to user for INFORMs
cbFun(sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, cbCtx)
except TypeError:
# a backward compatible way of calling user function
cbFun(sendRequestHandle, errorIndication, cbCtx)
def sendNotification(
self,
snmpEngine,
notificationTarget,
notificationName,
additionalVarBinds=(),
cbFun=None,
cbCtx=None,
contextName=null,
instanceIndex=None
):
debug.logger & debug.flagApp and debug.logger('sendNotification: notificationTarget %s, notificationName %s, additionalVarBinds %s, contextName "%s", instanceIndex %s' % (notificationTarget, notificationName, additionalVarBinds, contextName, instanceIndex))
if contextName:
__SnmpAdminString, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('SNMP-FRAMEWORK-MIB', 'SnmpAdminString')
contextName = __SnmpAdminString(contextName)
# 3.3
( notifyTag,
notifyType ) = config.getNotificationInfo(
snmpEngine, notificationTarget
)
metaSendPduHandle = getNextHandle()
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, notifyTag %s, notifyType %s' % (metaSendPduHandle, notifyTag, notifyType))
contextMibInstrumCtl = self.snmpContext.getMibInstrum(contextName)
additionalVarBinds = [ (v2c.ObjectIdentifier(x),y) for x,y in additionalVarBinds ]
for targetAddrName in config.getTargetNames(snmpEngine, notifyTag):
( transportDomain,
transportAddress,
timeout,
retryCount,
params ) = config.getTargetAddr(snmpEngine, targetAddrName)
( messageProcessingModel,
securityModel,
securityName,
securityLevel ) = config.getTargetParams(snmpEngine, params)
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, notifyTag %s yields: transportDomain %s, transportAddress %r, securityModel %s, securityName %s, securityLevel %s' % (metaSendPduHandle, notifyTag, transportDomain, transportAddress, securityModel, securityName, securityLevel))
# 3.3.1 XXX
# XXX filtering's yet to be implemented
# filterProfileName = config.getNotifyFilterProfile(params)
# ( filterSubtree,
# filterMask,
# filterType ) = config.getNotifyFilter(filterProfileName)
varBinds = []
# 3.3.2 & 3.3.3
sysUpTime, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'sysUpTime')
for varName, varVal in additionalVarBinds:
if varName == sysUpTime.name:
varBinds.append((varName, varVal))
break
if not varBinds:
varBinds.append((sysUpTime.name,
sysUpTime.syntax.clone())) # for actual value
snmpTrapOid, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpTrapOID')
if len(notificationName) == 2: # ('MIB', 'symbol')
notificationTypeObject, = contextMibInstrumCtl.mibBuilder.importSymbols(*notificationName)
varBinds.append((snmpTrapOid.name, v2c.ObjectIdentifier(notificationTypeObject.name)))
debug.logger & debug.flagApp and debug.logger('sendNotification: notification type object is %s' % notificationTypeObject)
for notificationObject in notificationTypeObject.getObjects():
mibNode, = contextMibInstrumCtl.mibBuilder.importSymbols(*notificationObject)
if instanceIndex:
mibNode = mibNode.getNode(mibNode.name + instanceIndex)
else:
mibNode = mibNode.getNextNode(mibNode.name)
varBinds.append((mibNode.name, mibNode.syntax))
debug.logger & debug.flagApp and debug.logger('sendNotification: processed notification object %s, instance index %s, var-bind %s' % (notificationObject, instanceIndex is None and "<first>" or instanceIndex, mibNode))
elif notificationName: # numeric OID
varBinds.append(
(snmpTrapOid.name,
snmpTrapOid.syntax.clone(notificationName))
)
else:
varBinds.append((snmpTrapOid.name, snmpTrapOid.syntax))
for varName, varVal in additionalVarBinds:
if varName in (sysUpTime.name, snmpTrapOid.name):
continue
try:
snmpEngine.accessControlModel[self.acmID].isAccessAllowed(
snmpEngine, securityModel, securityName,
securityLevel, 'notify', contextName, varName
)
except error.StatusInformation:
debug.logger & debug.flagApp and debug.logger('sendNotification: OID %s not allowed for %s, droppping notification' % (varName, securityName))
return
else:
varBinds.append((varName, varVal))
# 3.3.4
if notifyType == 1:
pdu = v2c.SNMPv2TrapPDU()
elif notifyType == 2:
pdu = v2c.InformRequestPDU()
else:
raise RuntimeError()
v2c.apiPDU.setDefaults(pdu)
v2c.apiPDU.setVarBinds(pdu, varBinds)
# User-side API assumes SMIv2
if messageProcessingModel == 0:
reqPDU = rfc2576.v2ToV1(pdu)
pduVersion = 0
else:
reqPDU = pdu
pduVersion = 1
# 3.3.5
if notifyType == 1:
try:
snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
self.snmpContext.contextEngineId,
contextName,
pduVersion,
reqPDU,
None
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('sendReq: metaSendPduHandle %s: sendPdu() failed with %r' % (metaSendPduHandle, statusInformation))
if metaSendPduHandle not in self.__pendingNotifications or \
not self.__pendingNotifications[metaSendPduHandle]:
if metaSendPduHandle in self.__pendingNotifications:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return metaSendPduHandle
else:
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(timeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
# 3.3.6a
try:
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
self.snmpContext.contextEngineId,
contextName,
pduVersion,
reqPDU,
1, # expectResponse
timeoutInTicks,
self.processResponsePdu,
(cbFun, cbCtx)
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('sendReq: metaSendPduHandle %s: sendPdu() failed with %r' % (metaSendPduHandle, statusInformation))
if metaSendPduHandle not in self.__pendingNotifications or \
not self.__pendingNotifications[metaSendPduHandle]:
if metaSendPduHandle in self.__pendingNotifications:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return metaSendPduHandle
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, sendPduHandle %s, timeout %d' % (metaSendPduHandle, sendPduHandle, timeout))
# 3.3.6b
self.__pendingReqs[sendPduHandle] = (
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
self.snmpContext.contextEngineId,
contextName,
pdu,
timeout,
retryCount,
1,
metaSendPduHandle
)
if metaSendPduHandle not in self.__pendingNotifications:
self.__pendingNotifications[metaSendPduHandle] = 0
self.__pendingNotifications[metaSendPduHandle] += 1
snmpEngine.transportDispatcher.jobStarted(id(self))
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, notification(s) sent' % metaSendPduHandle)
return metaSendPduHandle
# XXX
# move/group/implement config setting/retrieval at a stand-alone module
| mit |
40223148/2015cda_g5 | static/Brython3.1.1-20150328-091302/Lib/urllib/parse.py | 735 | 35170 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _hexdig for b in _hexdig}
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-')
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
| gpl-3.0 |
kosgroup/odoo | addons/stock/models/stock_location.py | 4 | 13345 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil import relativedelta
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
class Location(models.Model):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
@api.model
def default_get(self, fields):
res = super(Location, self).default_get(fields)
if 'barcode' in fields and 'barcode' not in res and res.get('complete_name'):
res['barcode'] = res['complete_name']
return res
name = fields.Char('Location Name', required=True, translate=True)
# TDE CLEAME: unnecessary field, use name_get instead
complete_name = fields.Char("Full Location Name", compute='_compute_complete_name', store=True)
active = fields.Boolean('Active', default=True, help="By unchecking the active field, you may hide a location without deleting it.")
usage = fields.Selection([
('supplier', 'Vendor Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory Loss'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')], string='Location Type',
default='internal', index=True, required=True,
help="* Vendor Location: Virtual location representing the source location for products coming from your vendors"
"\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products"
"\n* Internal Location: Physical locations inside your own warehouses,"
"\n* Customer Location: Virtual location representing the destination location for products sent to your customers"
"\n* Inventory Loss: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)"
"\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (vendor or production) is not known yet. This location should be empty when the procurement scheduler has finished running."
"\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products"
"\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations")
location_id = fields.Many2one(
'stock.location', 'Parent Location', index=True, ondelete='cascade',
help="The parent location that includes this location. Example : The 'Dispatch Zone' is the 'Gate 1' parent location.")
child_ids = fields.One2many('stock.location', 'location_id', 'Contains')
partner_id = fields.Many2one('res.partner', 'Owner', help="Owner of the location if not internal")
comment = fields.Text('Additional Information')
posx = fields.Integer('Corridor (X)', default=0, help="Optional localization details, for information purpose only")
posy = fields.Integer('Shelves (Y)', default=0, help="Optional localization details, for information purpose only")
posz = fields.Integer('Height (Z)', default=0, help="Optional localization details, for information purpose only")
parent_left = fields.Integer('Left Parent', index=True)
parent_right = fields.Integer('Right Parent', index=True)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('stock.location'), index=True,
help='Let this field empty if this location is shared between companies')
scrap_location = fields.Boolean('Is a Scrap Location?', default=False, help='Check this box to allow using this location to put scrapped/damaged goods.')
return_location = fields.Boolean('Is a Return Location?', help='Check this box to allow using this location as a return location.')
removal_strategy_id = fields.Many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here.")
putaway_strategy_id = fields.Many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here.")
barcode = fields.Char('Barcode', copy=False, oldname='loc_barcode')
_sql_constraints = [('barcode_company_uniq', 'unique (barcode,company_id)', 'The barcode for a location must be unique per company !')]
@api.one
@api.depends('name', 'location_id')
def _compute_complete_name(self):
""" Forms complete name of location from parent location to child location. """
name = self.name
current = self
while current.location_id and current.usage != 'view':
current = current.location_id
name = '%s/%s' % (current.name, name)
self.complete_name = name
@api.multi
def name_get(self):
return [(location.id, location.complete_name) for location in self]
def get_putaway_strategy(self, product):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
current_location = self
putaway_location = self.env['stock.location']
while current_location and not putaway_location:
if current_location.putaway_strategy_id:
putaway_location = current_location.putaway_strategy_id.putaway_apply(product)
current_location = current_location.location_id
return putaway_location
@api.multi
@api.returns('stock.warehouse', lambda value: value.id)
def get_warehouse(self):
""" Returns warehouse id of warehouse that contains location """
return self.env['stock.warehouse'].search([
('view_location_id.parent_left', '<=', self.parent_left),
('view_location_id.parent_right', '>=', self.parent_left)], limit=1)
class Route(models.Model):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
name = fields.Char('Route Name', required=True, translate=True)
active = fields.Boolean('Active', default=True, help="If the active field is set to False, it will allow you to hide the route without removing it.")
sequence = fields.Integer('Sequence', default=0)
pull_ids = fields.One2many('procurement.rule', 'route_id', 'Procurement Rules', copy=True)
push_ids = fields.One2many('stock.location.path', 'route_id', 'Push Rules', copy=True)
product_selectable = fields.Boolean('Applicable on Product', default=True, help="When checked, the route will be selectable in the Inventory tab of the Product form. It will take priority over the Warehouse route. ")
product_categ_selectable = fields.Boolean('Applicable on Product Category', help="When checked, the route will be selectable on the Product Category. It will take priority over the Warehouse route. ")
warehouse_selectable = fields.Boolean('Applicable on Warehouse', help="When a warehouse is selected for this route, this route should be seen as the default route when products pass through this warehouse. This behaviour can be overridden by the routes on the Product/Product Categories or by the Preferred Routes on the Procurement")
supplied_wh_id = fields.Many2one('stock.warehouse', 'Supplied Warehouse')
supplier_wh_id = fields.Many2one('stock.warehouse', 'Supplying Warehouse')
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('stock.location.route'), index=True,
help='Leave this field empty if this route is shared between all companies')
product_ids = fields.Many2many('product.template', 'stock_route_product', 'route_id', 'product_id', 'Products')
categ_ids = fields.Many2many('product.category', 'stock_location_route_categ', 'route_id', 'categ_id', 'Product Categories')
warehouse_ids = fields.Many2many('stock.warehouse', 'stock_route_warehouse', 'route_id', 'warehouse_id', 'Warehouses')
@api.multi
def write(self, values):
'''when a route is deactivated, deactivate also its pull and push rules'''
res = super(Route, self).write(values)
if 'active' in values:
self.mapped('push_ids').filtered(lambda path: path.active != values['active']).write({'active': values['active']})
self.mapped('pull_ids').filtered(lambda rule: rule.active != values['active']).write({'active': values['active']})
return res
@api.multi
def view_product_ids(self):
return {
'name': _('Products'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.template',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', self.ids)],
}
@api.multi
def view_categ_ids(self):
return {
'name': _('Product Categories'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.category',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', self.ids)],
}
class PushedFlow(models.Model):
_name = "stock.location.path"
_description = "Pushed Flow"
_order = "name"
name = fields.Char('Operation Name', required=True)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('procurement.order'), index=True)
route_id = fields.Many2one('stock.location.route', 'Route')
location_from_id = fields.Many2one(
'stock.location', 'Source Location', index=True, ondelete='cascade', required=True,
help="This rule can be applied when a move is confirmed that has this location as destination location")
location_dest_id = fields.Many2one(
'stock.location', 'Destination Location', index=True, ondelete='cascade', required=True,
help="The new location where the goods need to go")
delay = fields.Integer('Delay (days)', default=0, help="Number of days needed to transfer the goods")
picking_type_id = fields.Many2one(
'stock.picking.type', 'Picking Type', required=True,
help="This is the picking type that will be put on the stock moves")
auto = fields.Selection([
('manual', 'Manual Operation'),
('transparent', 'Automatic No Step Added')], string='Automatic Move',
default='manual', index=True, required=True,
help="The 'Manual Operation' value will create a stock move after the current one."
"With 'Automatic No Step Added', the location is replaced in the original move.")
propagate = fields.Boolean('Propagate cancel and split', default=True, help='If checked, when the previous move is cancelled or split, the move generated by this move will too')
active = fields.Boolean('Active', default=True)
warehouse_id = fields.Many2one('stock.warehouse', 'Warehouse')
route_sequence = fields.Integer('Route Sequence', related='route_id.sequence', store=True)
sequence = fields.Integer('Sequence')
def _apply(self, move):
new_date = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=self.delay)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if self.auto == 'transparent':
move.write({
'date': new_date,
'date_expected': new_date,
'location_dest_id': self.location_dest_id.id})
# avoid looping if a push rule is not well configured; otherwise call again push_apply to see if a next step is defined
if self.location_dest_id != move.location_dest_id:
# TDE FIXME: should probably be done in the move model IMO
move._push_apply()
else:
new_move = move.copy({
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': self.location_dest_id.id,
'date': new_date,
'date_expected': new_date,
'company_id': self.company_id.id,
'picking_id': False,
'picking_type_id': self.picking_type_id.id,
'propagate': self.propagate,
'push_rule_id': self.id,
'warehouse_id': self.warehouse_id.id,
'procurement_id': False,
})
move.write({'move_dest_id': new_move.id})
new_move.action_confirm()
| gpl-3.0 |
sfluo/Mr.Bot | crypto/pycrypto-2.6/build/lib.macosx-10.7-intel-2.7/Crypto/Hash/hashalgo.py | 124 | 3984 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from binascii import hexlify
class HashAlgo:
"""A generic class for an abstract cryptographic hash algorithm.
:undocumented: block_size
"""
#: The size of the resulting hash in bytes.
digest_size = None
#: The internal block size of the hash algorithm in bytes.
block_size = None
def __init__(self, hashFactory, data=None):
"""Initialize the hash object.
:Parameters:
hashFactory : callable
An object that will generate the actual hash implementation.
*hashFactory* must have a *new()* method, or must be directly
callable.
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `update()`.
"""
if hasattr(hashFactory, 'new'):
self._hash = hashFactory.new()
else:
self._hash = hashFactory()
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
return self._hash.update(data)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
return self._hash.digest()
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return self._hash.hexdigest()
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
return self._hash.copy()
def new(self, data=None):
"""Return a fresh instance of the hash object.
Unlike the `copy` method, the internal state of the object is empty.
:Parameters:
data : byte string
The next chunk of the message being hashed.
:Return: A hash object of the same type
"""
pass
| bsd-3-clause |
reddraggone9/youtube-dl | youtube_dl/extractor/infoq.py | 92 | 2315 | from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urlparse,
)
class InfoQIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': '12-jan-pythonthings',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
video_filename = playpath.split('/')[-1]
video_id, extension = video_filename.split('.')
http_base = self._search_regex(
r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage,
'HTTP base URL')
formats = [{
'format_id': 'rtmp',
'url': video_url,
'ext': extension,
'play_path': playpath,
}, {
'format_id': 'http',
'url': compat_urlparse.urljoin(url, http_base) + real_id,
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
| unlicense |
phoenixstar7/libsvm | tools/subset.py | 124 | 3202 | #!/usr/bin/env python
import os, sys, math, random
from collections import defaultdict
if sys.version_info[0] >= 3:
xrange = range
def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subset_size [output1] [output2]
This script randomly selects a subset of the dataset.
options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection
output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)
def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)
# default method is stratified selection
method = 0
subset_file = sys.stdout
rest_file = None
i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0,1]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1
dataset = argv[i]
subset_size = int(argv[i+1])
if i+2 < argc:
subset_file = open(argv[i+2],'w')
if i+3 < argc:
rest_file = open(argv[i+3],'w')
return dataset, subset_size, method, subset_file, rest_file
def random_selection(dataset, subset_size):
l = sum(1 for line in open(dataset,'r'))
return sorted(random.sample(xrange(l), subset_size))
def stratified_selection(dataset, subset_size):
labels = [line.split(None,1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]
l = len(labels)
remaining = subset_size
ret = []
# classes with fewer data are sampled first; otherwise
# some rare classes may not be selected
for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
linenums = label_linenums[label]
label_size = len(linenums)
# at least one instance per class
s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l)))))
if s == 0:
sys.stderr.write('''\
Error: failed to have at least one instance per class
1. You may have regression data.
2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
sys.exit(-1)
remaining -= s
ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
return sorted(ret)
def main(argv=sys.argv):
dataset, subset_size, method, subset_file, rest_file = process_options(argv)
#uncomment the following line to fix the random seed
#random.seed(0)
selected_lines = []
if method == 0:
selected_lines = stratified_selection(dataset, subset_size)
elif method == 1:
selected_lines = random_selection(dataset, subset_size)
#select instances based on selected_lines
dataset = open(dataset,'r')
prev_selected_linenum = -1
for i in xrange(len(selected_lines)):
for cnt in xrange(selected_lines[i]-prev_selected_linenum-1):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()
if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
anaran/kuma | vendor/packages/translate/filters/test_prefilters.py | 33 | 1123 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tests decoration handling functions that are used by checks"""
from translate.filters import prefilters
def test_removekdecomments():
assert prefilters.removekdecomments(u"Some sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\nSome sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\n") == u""
def test_filterwordswithpunctuation():
string = u"Nothing in here."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == string
# test listed words (start / end with apostrophe)
string = u"'n Boom het 'n tak."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "n Boom het n tak."
# test words containing apostrophe
string = u"It's in it's own place."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "Its in its own place."
# test strings in unicode
string = u"Iṱ'š"
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == u"Iṱš"
| mpl-2.0 |
hpcloud-mon/tempest | tempest/api_schema/response/compute/version.py | 16 | 2153 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
version = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'version': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string', 'format': 'uri'},
'rel': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['href', 'rel']
}
},
'media-types': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['base', 'type']
}
},
'status': {'type': 'string'},
'updated': {'type': 'string', 'format': 'date-time'}
},
'required': ['id', 'links', 'media-types', 'status', 'updated']
}
},
'required': ['version']
}
}
| apache-2.0 |
peiyuwang/pants | tests/python/pants_test/backend/python/tasks/interpreter_cache_test_mixin.py | 17 | 1196 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
class InterpreterCacheTestMixin(object):
"""A mixin to allow tests to use the "real" interpreter cache.
This is so each test doesn't waste huge amounts of time recreating the cache on each run.
Note: Must be mixed in to a subclass of BaseTest.
"""
def setUp(self):
super(InterpreterCacheTestMixin, self).setUp()
# It would be nice to get the location of the real interpreter cache from PythonSetup,
# but unfortunately real subsystems aren't available here (for example, we have no access
# to the enclosing pants instance's options), so we have to hard-code it.
python_setup_workdir = os.path.join(self.real_build_root, '.pants.d', 'python-setup')
self.set_options_for_scope('python-setup',
interpreter_cache_dir=os.path.join(python_setup_workdir, 'interpreters'),
chroot_cache_dir=os.path.join(python_setup_workdir, 'chroots'))
| apache-2.0 |
implemento/domino | app/app/urls.py | 1 | 1107 | from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls import url, include
from django.contrib.auth.models import User
from rest_framework import routers, serializers, viewsets
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'is_staff')
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include('domino.urls')),
url(r'^api/v1/', include(router.urls)),
url(r'^domino/', include('domino.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| gpl-3.0 |
dimmddr/roadSignsNN | prepare_images.py | 1 | 8513 | import cv2
import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.stride_tricks import as_strided
import nn
from settings import COVER_PERCENT
IMG_WIDTH = 1025
IMG_HEIGHT = 523
IMG_LAYERS = 3
SUB_IMG_WIDTH = 48
SUB_IMG_HEIGHT = 48
SUB_IMG_LAYERS = 3
WIDTH = 2
HEIGHT = 1
LAYERS = 0
XMIN = 0
YMIN = 1
XMAX = 2
YMAX = 3
# TODO: переписать либо все с использованием Rectangle namedtuple, либо через numpy. Например с помощью recarray
def compute_covering(window, label):
dx = min(window.xmax, label.xmax) - max(window.xmin, label.xmin)
dy = min(window.ymax, label.ymax) - max(window.ymin, label.ymin)
if (dx >= 0) and (dy >= 0):
label_cover = dx * dy / ((label.xmax - label.xmin) * (label.ymax - label.ymin))
window_cover = dx * dy / ((window.xmax - window.xmin) * (window.ymax - window.ymin))
return max(label_cover, window_cover)
else:
return 0
def split_into_subimgs(img, sub_img_shape, debug, step=1):
shape = (int(np.floor((img.shape[HEIGHT] - sub_img_shape[HEIGHT]) / step)),
int(np.floor((img.shape[WIDTH] - sub_img_shape[WIDTH]) / step)),
SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH)
# shape = (lbl_array.shape[0], SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH)
result_array = as_strided(img, shape=shape,
strides=(
img.strides[1] * step + (img.shape[WIDTH] - sub_img_shape[WIDTH]) % step *
img.strides[2],
img.strides[2] * step,
img.strides[0], img.strides[1], img.strides[2]))
return result_array
def get_labels(labels, result_array_shape, step, sub_img_shape):
lbl_array = np.zeros(shape=(result_array_shape[0], result_array_shape[1]))
index = 0
for i in range(lbl_array.shape[0]):
for ii in range(lbl_array.shape[1]):
# Rectangle = namedtuple('Rectangle', ['xmin', 'ymin', 'xmax', 'ymax'])
window = nn.Rectangle(ii * step, i * step, ii * step + sub_img_shape[HEIGHT],
i * step + sub_img_shape[WIDTH])
cover = np.array([compute_covering(window=window,
label=nn.Rectangle(lbl[0], lbl[1], lbl[2], lbl[3])) for lbl in labels])
is_cover = int(np.any(cover > COVER_PERCENT))
lbl_array[i, ii] = is_cover
index += 1
return lbl_array
def prepare(img_path, labels, debug=False):
step = 2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if debug:
print("Prepare image " + img_path)
print(img.shape)
print(labels)
res_img = img / 255
res_img = np.array([res_img[:, :, 0], res_img[:, :, 1], res_img[:, :, 2]])
res = split_into_subimgs(res_img, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH),
step=step, debug=debug)
lbl_res = get_labels(labels=labels, result_array_shape=res.shape,
step=step, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH))
return res, lbl_res
def prepare_calibration(img_path, labels, debug=False):
# Возвращает метки в виде (yn, xn, wn, hn), для калибровки рамки изображения
# если (x, y) координаты верхенго левого угла и (w, h) соответственно ширина и высота,
# то новая рамка будет (x - xn * w / wn, y - yn * h / hn), (w / wn, h / hn)
"""
:param img_path:
:param labels:
:param debug:
:return:
@note: Первая сетка должна преобразовывать изображение в пределах [16, 64], вторая в [8, 128]
Так как изначально окно 32х32, то максимальное значение корректировки должно быть 2, минимально 0.5.
Делать по три класса на ширину и высоту удобно, но вряд ли практично. Стоит попробовать сделать хотя бы по 5.
Делать удобно нечетное количество, чтобы были доступны три варианта: максимум, минимум и оставить как есть.
Варианты множителей получаются: [1/2, 3/4, 1, 6/4, 2]
соответсвенно размеры для изначального варианта 32: [16, 24, 32, 48, 64]
"""
step = 2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if debug:
print("Prepare image " + img_path)
print(img.shape)
print(labels)
res_img = img / 255
res_img = np.array([res_img[:, :, 0], res_img[:, :, 1], res_img[:, :, 2]])
res = split_into_subimgs(res_img, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH),
step=step, debug=debug)
lbl_res = get_labels(labels=labels, result_array_shape=res.shape,
step=step, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH))
# todo: необходимо решить что делать в случае с несколькими знаками -
# нужно каким-то образом получить координаты нужного
xmin, ymin, xmax, ymax = labels[0]
for image in res[lbl_res == 1]:
pass
# нужно из массива изображений и массива меток к ним вытащить координаты изображений.
# Ширина и высота в случае первичной подготовки известны и одинаковы.
# Координаты можно получить индекса изображения, нужно только достать этот индекс
return res, lbl_res
def show_sign(img_path, lbl):
print(img_path)
print(lbl)
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
cv2.imshow("img", img[lbl[1]:lbl[3], lbl[0]:lbl[2], :])
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.rectangle(img, (lbl[0], lbl[1]), (lbl[2], lbl[3]), 2)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_roi(roi_list):
for roi in roi_list:
(r, g, b) = (roi[0], roi[1], roi[2])
roi = cv2.merge((r, g, b))
cv2.imshow("img", roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_rectangles(filename, rectangles_list, show_type='matplotlib'):
img = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
for rect in rectangles_list:
if rect is not None:
cv2.rectangle(img, (rect[XMIN], rect[YMIN]), (rect[XMAX], rect[YMAX]), (0, 255, 0), 1)
if show_type == 'matplotlib':
(b, g, r) = cv2.split(img)
img = cv2.merge((r, g, b))
plt.imshow(img)
plt.show()
else:
cv2.imshow(filename, img)
cv2.waitKey()
# TODO добавить схранение в отдельный каталог
def save_img_with_rectangles(dataset_path, filename, rectangles_list):
img = cv2.imread(dataset_path + filename, cv2.IMREAD_UNCHANGED)
for rect in rectangles_list:
if rect is not None:
cv2.rectangle(img, (rect[XMIN], rect[YMIN]), (rect[XMAX], rect[YMAX]), (0, 255, 0), 1)
cv2.imwrite(dataset_path + "results/" + filename + "_with_rects.jpg", img)
# Probably temp function before I fix localization
def get_roi_from_images(images, img_path):
res_roi = []
res_label = []
label_dict = dict()
for image in images:
img = cv2.imread(img_path + image.filename.decode('utf8'), cv2.IMREAD_UNCHANGED)
for sign in image.signs:
if sign.label not in label_dict:
label_dict[sign.label] = len(label_dict)
(x1, y1, x2, y2) = sign.coord
roi = img[y1:y2, x1:x2, :]
res_roi.append(np.array([roi[:, :, 0], roi[:, :, 1], roi[:, :, 2]]))
res_label.append(label_dict[sign.label])
return res_roi, res_label, label_dict
def create_synthetic_data(imgs):
# Create array of size mods [1, 4], step = 0.5
sizes = np.arange(start=1, stop=4.5, step=0.5)
total = imgs.shape[0] * sizes.shape[0] * 2 # *2
res = []
return imgs
| mit |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/test/test_operations.py | 5 | 2028 | # Python test set -- part 3, built-in operations.
print '3. Operations'
print 'XXX Mostly not yet implemented'
print '3.1 Dictionary lookups succeed even if __cmp__() raises an exception'
class BadDictKey:
already_printed_raising_error = 0
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
if not BadDictKey.already_printed_raising_error:
# How many times __cmp__ gets called depends on the hash
# code and the internals of the dict implementation; we
# know it will be called at least once, but that's it.
# already_printed_raising_error makes sure the expected-
# output file prints the msg at most once.
BadDictKey.already_printed_raising_error = 1
print "raising error"
raise RuntimeError, "gotcha"
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
d[x2] = 2
print "No exception passed through."
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
print 'resize bugs not triggered.'
| gpl-2.0 |
eric-haibin-lin/mxnet | example/profiler/profiler_ndarray.py | 27 | 11345 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
import pickle as pkl
def _np_reduce(dat, axis, keepdims, numpy_reduce_func):
if isinstance(axis, int):
axis = [axis]
else:
axis = list(axis) if axis is not None else range(len(dat.shape))
ret = dat
for i in reversed(sorted(axis)):
ret = numpy_reduce_func(ret, axis=i)
if keepdims:
keepdims_shape = list(dat.shape)
for i in axis:
keepdims_shape[i] = 1
ret = ret.reshape(tuple(keepdims_shape))
return ret
def reldiff(a, b):
diff = np.abs(a - b)
norm = np.abs(a)
reldiff = np.max(diff / (norm + 1e-7))
return reldiff
def same(a, b):
return np.sum(a != b) == 0
def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
"""check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int):
assert dim
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
arg_shapes = [shape] * arg_shapes
for dtype in type_list:
ndarray_arg = []
numpy_arg = []
for s in arg_shapes:
npy = np.random.uniform(rmin, 10, s).astype(dtype)
narr = mx.nd.array(npy, dtype=dtype)
ndarray_arg.append(narr)
numpy_arg.append(npy)
out1 = uf(*ndarray_arg)
if npuf is None:
out2 = uf(*numpy_arg).astype(dtype)
else:
out2 = npuf(*numpy_arg).astype(dtype)
assert out1.shape == out2.shape
if isinstance(out1, mx.nd.NDArray):
out1 = out1.asnumpy()
if dtype == np.float16:
assert reldiff(out1, out2) < 2e-3
else:
assert reldiff(out1, out2) < 1e-6
def random_ndarray(dim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
data = mx.nd.array(np.random.uniform(-10, 10, shape))
return data
def test_ndarray_elementwise():
np.random.seed(0)
nrepeat = 10
maxdim = 4
all_type = [np.float32, np.float64, np.float16, np.uint8, np.int32]
real_type = [np.float32, np.float64, np.float16]
for repeat in range(nrepeat):
for dim in range(1, maxdim):
check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x * y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x / y, 2, dim, type_list=real_type)
check_with_uniform(lambda x, y: x / y, 2, dim, rmin=1, type_list=all_type)
check_with_uniform(mx.nd.sqrt, 1, dim, np.sqrt, rmin=0)
check_with_uniform(mx.nd.square, 1, dim, np.square, rmin=0)
check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm)
def test_ndarray_negate():
npy = np.random.uniform(-10, 10, (2,3,4))
arr = mx.nd.array(npy)
assert reldiff(npy, arr.asnumpy()) < 1e-6
assert reldiff(-npy, (-arr).asnumpy()) < 1e-6
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert reldiff(npy, arr.asnumpy()) < 1e-6
def test_ndarray_choose():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
assert same(npy[np.arange(shape[0]), indices],
mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy())
def test_ndarray_fill():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
new_npy = npy.copy()
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
val = np.random.randint(shape[1], size=shape[0])
new_npy[:] = npy
new_npy[np.arange(shape[0]), indices] = val
assert same(new_npy,
mx.nd.fill_element_0index(arr, mx.nd.array(val), mx.nd.array(indices)).asnumpy())
def test_ndarray_onehot():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
npy[:] = 0.0
npy[np.arange(shape[0]), indices] = 1.0
mx.nd.onehot_encode(mx.nd.array(indices), out=arr)
assert same(npy, arr.asnumpy())
def test_ndarray_copy():
c = mx.nd.array(np.random.uniform(-10, 10, (10, 10)))
d = c.copyto(mx.Context('cpu', 0))
assert np.sum(np.abs(c.asnumpy() != d.asnumpy())) == 0.0
def test_ndarray_scalar():
c = mx.nd.empty((10,10))
d = mx.nd.empty((10,10))
c[:] = 0.5
d[:] = 1.0
d -= c * 2 / 3 * 6.0
c += 0.5
assert(np.sum(c.asnumpy()) - 100 < 1e-5)
assert(np.sum(d.asnumpy()) + 100 < 1e-5)
c[:] = 2
assert(np.sum(c.asnumpy()) - 200 < 1e-5)
d = -c + 2
assert(np.sum(d.asnumpy()) < 1e-5)
def test_ndarray_pickle():
np.random.seed(0)
maxdim = 5
nrepeat = 10
for repeat in range(nrepeat):
for dim in range(1, maxdim):
a = random_ndarray(dim)
b = mx.nd.empty(a.shape)
a[:] = np.random.uniform(-10, 10, a.shape)
b[:] = np.random.uniform(-10, 10, a.shape)
a = a + b
data = pkl.dumps(a)
a2 = pkl.loads(data)
assert np.sum(a.asnumpy() != a2.asnumpy()) == 0
def test_ndarray_saveload():
np.random.seed(0)
maxdim = 5
nrepeat = 10
fname = 'tmp_list.bin'
for repeat in range(nrepeat):
data = []
for i in range(10):
data.append(random_ndarray(np.random.randint(1, 5)))
mx.nd.save(fname, data)
data2 = mx.nd.load(fname)
assert len(data) == len(data2)
for x, y in zip(data, data2):
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
dmap = {'ndarray xx %s' % i : x for i, x in enumerate(data)}
mx.nd.save(fname, dmap)
dmap2 = mx.nd.load(fname)
assert len(dmap2) == len(dmap)
for k, x in dmap.items():
y = dmap2[k]
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
os.remove(fname)
def test_ndarray_slice():
shape = (10,)
A = mx.nd.array(np.random.uniform(-10, 10, shape))
A2 = A.asnumpy()
assert same(A[3:8].asnumpy(), A2[3:8])
A2[3:8] *= 10;
A[3:8] = A2[3:8]
assert same(A[3:8].asnumpy(), A2[3:8])
def test_ndarray_slice_along_axis():
arr = mx.nd.array(np.random.uniform(-10, 10, (3, 4, 2, 3)))
sub_arr = arr.slice(begin=(None, 1), end=(None, 3))
# test we sliced correctly
assert same(arr.asnumpy()[:, 1:3, :, :], sub_arr.asnumpy())
# test that slice is copy, instead of shared memory
sub_arr[:] = 0
assert not same(arr.asnumpy()[:, 1:3, :, :], sub_arr.asnumpy())
def test_clip():
shape = (10,)
A = mx.random.uniform(-10, 10, shape)
B = mx.nd.clip(A, -2, 2)
B1 = B.asnumpy()
for i in range(shape[0]):
assert B1[i] >= -2
assert B1[i] <= 2
def test_dot():
a = np.random.uniform(-3, 3, (3, 4))
b = np.random.uniform(-3, 3, (4, 5))
c = np.dot(a, b)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B)
assert reldiff(c, C.asnumpy()) < 1e-5
def test_reduce():
sample_num = 200
def test_reduce_inner(numpy_reduce_func, nd_reduce_func):
for i in range(sample_num):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 11, size=ndim)
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
keepdims = np.random.randint(0, 2)
dat = np.random.rand(*shape) - 0.5
if 0 == len(axes):
axes = tuple(range(ndim))
else:
axes = tuple(axes)
numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)
ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == numpy_ret.shape) or \
(ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
%(ndarray_ret.shape, numpy_ret.shape)
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-4
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.sum),
mx.nd.sum)
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.max),
mx.nd.max)
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.min),
mx.nd.min)
def test_broadcast():
sample_num = 1000
def test_broadcast_to():
for i in range(sample_num):
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray_ret = mx.nd.array(dat).broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
test_broadcast_to()
if __name__ == '__main__':
mx.profiler.set_config(profile_all=True, filename='profile_ndarray.json')
mx.profiler.set_state('run')
test_ndarray_slice_along_axis()
test_broadcast()
test_ndarray_elementwise()
test_ndarray_slice()
test_ndarray_pickle()
test_ndarray_saveload()
test_ndarray_copy()
test_ndarray_negate()
test_ndarray_scalar()
test_clip()
test_dot()
test_ndarray_choose()
test_ndarray_onehot()
test_ndarray_fill()
test_reduce()
mx.profiler.set_state('stop')
| apache-2.0 |
coreos/autotest | client/setup.py | 3 | 2278 | from distutils.core import setup
import os
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import version
# Mostly needed when called one level up
if os.path.isdir('client'):
client_dir = 'client'
else:
client_dir = '.'
autotest_dir = os.path.join(client_dir, "..")
def _get_files(path):
'''
Given a path, return all the files in there to package
'''
flist=[]
for root, _, files in sorted(os.walk(path)):
for name in files:
fullname = os.path.join(root, name)
flist.append(fullname)
return flist
def get_filelist():
pd_filelist=['config/*' ]
pd_filelist.extend(_get_files(os.path.join(client_dir, 'profilers')))
pd_filelist.extend(_get_files(os.path.join(client_dir, 'tools')))
return pd_filelist
def get_packages():
return ['autotest.client.shared',
'autotest.client.shared.hosts',
'autotest.client.shared.test_utils',
'autotest.client.net',
'autotest.client.tools',
'autotest.client.profilers',
'autotest.client',
'autotest']
def get_scripts():
return [os.path.join(client_dir, 'autotest-local'),
os.path.join(client_dir, 'autotest-local-streamhandler'),
os.path.join(client_dir, 'autotest-daemon'),
os.path.join(client_dir, 'autotest-daemon-monitor')]
def get_data_files():
return [('/etc/autotest', [autotest_dir + '/global_config.ini',
autotest_dir + '/shadow_config.ini',]),]
def get_package_dir():
return {'autotest.client': client_dir, 'autotest' : autotest_dir}
def get_package_data():
return {'autotest.client' : get_filelist()}
def run():
setup(name='autotest',
description='Autotest test framework - local module',
maintainer='Lucas Meneghel Rodrigues',
author_email='[email protected]',
version=version.get_version(),
url='http://autotest.github.com',
package_dir=get_package_dir(),
package_data=get_package_data(),
packages= get_packages(),
scripts=get_scripts(),
data_files=get_data_files())
if __name__ == '__main__':
run()
| gpl-2.0 |
throoze/SongStore | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
leafjungle/luigi | luigi/mock.py | 48 | 5473 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This moduel provides a class :class:`MockTarget`, an implementation of :py:class:`~luigi.target.Target`.
:class:`MockTarget` contains all data in-memory.
The main purpose is unit testing workflows without writing to disk.
"""
import multiprocessing
from io import BytesIO
import sys
import warnings
from luigi import six
from luigi import target
from luigi.format import get_default_format, MixedUnicodeBytes
class MockFileSystem(target.FileSystem):
"""
MockFileSystem inspects/modifies _data to simulate file system operations.
"""
_data = None
def get_all_data(self):
# This starts a server in the background, so we don't want to do it in the global scope
if MockFileSystem._data is None:
MockFileSystem._data = multiprocessing.Manager().dict()
return MockFileSystem._data
def get_data(self, fn):
return self.get_all_data()[fn]
def exists(self, path):
return MockTarget(path).exists()
def remove(self, path, recursive=True, skip_trash=True):
"""
Removes the given mockfile. skip_trash doesn't have any meaning.
"""
if recursive:
to_delete = []
for s in self.get_all_data().keys():
if s.startswith(path):
to_delete.append(s)
for s in to_delete:
self.get_all_data().pop(s)
else:
self.get_all_data().pop(path)
def listdir(self, path):
"""
listdir does a prefix match of self.get_all_data(), but doesn't yet support globs.
"""
return [s for s in self.get_all_data().keys()
if s.startswith(path)]
def isdir(self, path):
return any(self.listdir(path))
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
mkdir is a noop.
"""
pass
def clear(self):
self.get_all_data().clear()
class MockTarget(target.FileSystemTarget):
fs = MockFileSystem()
def __init__(self, fn, is_tmp=None, mirror_on_stderr=False, format=None):
self._mirror_on_stderr = mirror_on_stderr
self._fn = fn
if format is None:
format = get_default_format()
# Allow to write unicode in file for retrocompatibility
if six.PY2:
format = format >> MixedUnicodeBytes
self.format = format
def exists(self,):
return self._fn in self.fs.get_all_data()
def rename(self, path, raise_if_exists=False):
if raise_if_exists and path in self.fs.get_all_data():
raise RuntimeError('Destination exists: %s' % path)
contents = self.fs.get_all_data().pop(self._fn)
self.fs.get_all_data()[path] = contents
@property
def path(self):
return self._fn
def open(self, mode):
fn = self._fn
mock_target = self
class Buffer(BytesIO):
# Just to be able to do writing + reading from the same buffer
_write_line = True
def set_wrapper(self, wrapper):
self.wrapper = wrapper
def write(self, data):
if six.PY3:
stderrbytes = sys.stderr.buffer
else:
stderrbytes = sys.stderr
if mock_target._mirror_on_stderr:
if self._write_line:
sys.stderr.write(fn + ": ")
stderrbytes.write(data)
if (data[-1]) == '\n':
self._write_line = True
else:
self._write_line = False
super(Buffer, self).write(data)
def close(self):
if mode == 'w':
try:
mock_target.wrapper.flush()
except AttributeError:
pass
mock_target.fs.get_all_data()[fn] = self.getvalue()
super(Buffer, self).close()
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.close()
def __enter__(self):
return self
def readable(self):
return mode == 'r'
def writeable(self):
return mode == 'w'
def seekable(self):
return False
if mode == 'w':
wrapper = self.format.pipe_writer(Buffer())
wrapper.set_wrapper(wrapper)
return wrapper
else:
return self.format.pipe_reader(Buffer(self.fs.get_all_data()[fn]))
class MockFile(MockTarget):
def __init__(self, *args, **kwargs):
warnings.warn("MockFile has been renamed MockTarget", DeprecationWarning, stacklevel=2)
super(MockFile, self).__init__(*args, **kwargs)
| apache-2.0 |
jsrudani/HadoopHDFSProject | dev-support/relnotes.py | 62 | 7865 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from optparse import OptionParser
import httplib
import urllib
import cgi
try:
import json
except ImportError:
import simplejson as json
namePattern = re.compile(r' \([0-9]+\)')
def clean(str):
return quoteHtml(re.sub(namePattern, "", str))
def formatComponents(str):
str = re.sub(namePattern, '', str).replace("'", "")
if str != "":
ret = "(" + str + ")"
else:
ret = ""
return quoteHtml(ret)
def quoteHtml(str):
return cgi.escape(str).encode('ascii', 'xmlcharrefreplace')
def mstr(obj):
if (obj == None):
return ""
return unicode(obj)
class Version:
"""Represents a version number"""
def __init__(self, data):
self.mod = False
self.data = data
found = re.match('^((\d+)(\.\d+)*).*$', data)
if (found):
self.parts = [ int(p) for p in found.group(1).split('.') ]
else:
self.parts = []
# backfill version with zeroes if missing parts
self.parts.extend((0,) * (3 - len(self.parts)))
def decBugFix(self):
self.mod = True
self.parts[2] -= 1
return self
def __str__(self):
if (self.mod):
return '.'.join([ str(p) for p in self.parts ])
return self.data
def __cmp__(self, other):
return cmp(self.parts, other.parts)
class Jira:
"""A single JIRA"""
def __init__(self, data, parent):
self.key = data['key']
self.fields = data['fields']
self.parent = parent
self.notes = None
def getId(self):
return mstr(self.key)
def getDescription(self):
return mstr(self.fields['description'])
def getReleaseNote(self):
if (self.notes == None):
field = self.parent.fieldIdMap['Release Note']
if (self.fields.has_key(field)):
self.notes=mstr(self.fields[field])
else:
self.notes=self.getDescription()
return self.notes
def getPriority(self):
ret = ""
pri = self.fields['priority']
if(pri != None):
ret = pri['name']
return mstr(ret)
def getAssignee(self):
ret = ""
mid = self.fields['assignee']
if(mid != None):
ret = mid['displayName']
return mstr(ret)
def getComponents(self):
return " , ".join([ comp['name'] for comp in self.fields['components'] ])
def getSummary(self):
return self.fields['summary']
def getType(self):
ret = ""
mid = self.fields['issuetype']
if(mid != None):
ret = mid['name']
return mstr(ret)
def getReporter(self):
ret = ""
mid = self.fields['reporter']
if(mid != None):
ret = mid['displayName']
return mstr(ret)
def getProject(self):
ret = ""
mid = self.fields['project']
if(mid != None):
ret = mid['key']
return mstr(ret)
class JiraIter:
"""An Iterator of JIRAs"""
def __init__(self, versions):
self.versions = versions
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field")
data = json.loads(resp.read())
self.fieldIdMap = {}
for part in data:
self.fieldIdMap[part['name']] = part['id']
self.jiras = []
at=0
end=1
count=100
while (at < end):
params = urllib.urlencode({'jql': "project in (HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('"+"' , '".join(versions)+"') and resolution = Fixed", 'startAt':at, 'maxResults':count})
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params)
data = json.loads(resp.read())
if (data.has_key('errorMessages')):
raise Exception(data['errorMessages'])
at = data['startAt'] + data['maxResults']
end = data['total']
self.jiras.extend(data['issues'])
self.iter = self.jiras.__iter__()
def __iter__(self):
return self
def next(self):
data = self.iter.next()
j = Jira(data, self)
return j
class Outputs:
"""Several different files to output to at the same time"""
def __init__(self, base_file_name, file_name_pattern, keys, params={}):
self.params = params
self.base = open(base_file_name%params, 'w')
self.others = {}
for key in keys:
both = dict(params)
both['key'] = key
self.others[key] = open(file_name_pattern%both, 'w')
def writeAll(self, pattern):
both = dict(self.params)
both['key'] = ''
self.base.write(pattern%both)
for key in self.others.keys():
both = dict(self.params)
both['key'] = key
self.others[key].write(pattern%both)
def writeKeyRaw(self, key, str):
self.base.write(str)
if (self.others.has_key(key)):
self.others[key].write(str)
def close(self):
self.base.close()
for fd in self.others.values():
fd.close()
def main():
parser = OptionParser(usage="usage: %prog [options] [USER-ignored] [PASSWORD-ignored] [VERSION]")
parser.add_option("-v", "--version", dest="versions",
action="append", type="string",
help="versions in JIRA to include in releasenotes", metavar="VERSION")
parser.add_option("--previousVer", dest="previousVer",
action="store", type="string",
help="previous version to include in releasenotes", metavar="VERSION")
(options, args) = parser.parse_args()
if (options.versions == None):
options.versions = []
if (len(args) > 2):
options.versions.append(args[2])
if (len(options.versions) <= 0):
parser.error("At least one version needs to be supplied")
versions = [ Version(v) for v in options.versions];
versions.sort();
maxVersion = str(versions[-1])
if(options.previousVer == None):
options.previousVer = str(versions[0].decBugFix())
print >> sys.stderr, "WARNING: no previousVersion given, guessing it is "+options.previousVer
list = JiraIter(options.versions)
version = maxVersion
outputs = Outputs("releasenotes.%(ver)s.html",
"releasenotes.%(key)s.%(ver)s.html",
["HADOOP","HDFS","MAPREDUCE","YARN"], {"ver":maxVersion, "previousVer":options.previousVer})
head = '<META http-equiv="Content-Type" content="text/html; charset=UTF-8">\n' \
'<title>Hadoop %(key)s %(ver)s Release Notes</title>\n' \
'<STYLE type="text/css">\n' \
' H1 {font-family: sans-serif}\n' \
' H2 {font-family: sans-serif; margin-left: 7mm}\n' \
' TABLE {margin-left: 7mm}\n' \
'</STYLE>\n' \
'</head>\n' \
'<body>\n' \
'<h1>Hadoop %(key)s %(ver)s Release Notes</h1>\n' \
'These release notes include new developer and user-facing incompatibilities, features, and major improvements. \n' \
'<a name="changes"/>\n' \
'<h2>Changes since Hadoop %(previousVer)s</h2>\n' \
'<ul>\n'
outputs.writeAll(head)
for jira in list:
line = '<li> <a href="https://issues.apache.org/jira/browse/%s">%s</a>.\n' \
' %s %s reported by %s and fixed by %s %s<br>\n' \
' <b>%s</b><br>\n' \
' <blockquote>%s</blockquote></li>\n' \
% (quoteHtml(jira.getId()), quoteHtml(jira.getId()), clean(jira.getPriority()), clean(jira.getType()).lower(),
quoteHtml(jira.getReporter()), quoteHtml(jira.getAssignee()), formatComponents(jira.getComponents()),
quoteHtml(jira.getSummary()), quoteHtml(jira.getReleaseNote()))
outputs.writeKeyRaw(jira.getProject(), line)
outputs.writeAll("</ul>\n</body></html>\n")
outputs.close()
if __name__ == "__main__":
main()
| apache-2.0 |
wlach/treeherder | treeherder/model/error_summary.py | 2 | 8237 | import json
import logging
import re
from django.conf import settings
from django.core.urlresolvers import reverse
logger = logging.getLogger(__name__)
LEAK_RE = re.compile(r'\d+ bytes leaked \((.+)\)$')
CRASH_RE = re.compile(r'.+ application crashed \[@ (.+)\]$')
MOZHARNESS_RE = re.compile(
r'^\d+:\d+:\d+[ ]+(?:DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL) - [ ]?'
)
def get_error_summary(all_errors):
"""
Transform the error lines into the artifact format.
Add bug suggestions if they are found.
"""
error_summary = []
bugscache_uri = '{0}{1}'.format(
settings.API_HOSTNAME,
reverse("bugscache-list")
)
terms_requested = {}
for err in all_errors:
# remove the mozharness prefix
clean_line = get_mozharness_substring(err['line'])
search_terms = []
# get a meaningful search term out of the error line
search_term = get_error_search_term(clean_line)
bugs = dict(open_recent=[], all_others=[])
# collect open recent and all other bugs suggestions
if search_term:
search_terms.append(search_term)
if search_term not in terms_requested:
# retrieve the list of suggestions from the api
bugs = get_bugs_for_search_term(
search_term,
bugscache_uri
)
terms_requested[search_term] = bugs
else:
bugs = terms_requested[search_term]
if not bugs or not (bugs['open_recent'] or
bugs['all_others']):
# no suggestions, try to use
# the crash signature as search term
crash_signature = get_crash_signature(clean_line)
if crash_signature:
search_terms.append(crash_signature)
if crash_signature not in terms_requested:
bugs = get_bugs_for_search_term(
crash_signature,
bugscache_uri
)
terms_requested[crash_signature] = bugs
else:
bugs = terms_requested[crash_signature]
# TODO: Rename 'search' to 'error_text' or similar, since that's
# closer to what it actually represents (bug 1091060).
error_summary.append({
"search": clean_line,
"search_terms": search_terms,
"bugs": bugs
})
return error_summary
def get_mozharness_substring(line):
return MOZHARNESS_RE.sub('', line).strip()
def get_error_search_term(error_line):
"""
retrieves bug suggestions from bugscache using search_term
in a full_text search.
"""
if not error_line:
return None
# This is strongly inspired by
# https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73
tokens = error_line.split(" | ")
search_term = None
if len(tokens) >= 3:
# it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format.
test_name_or_path = tokens[1]
message = tokens[2]
# Leak failure messages are of the form:
# leakcheck | .*\d+ bytes leaked (Object-1, Object-2, Object-3, ...)
match = LEAK_RE.search(message)
if match:
search_term = match.group(1)
else:
for splitter in ("/", "\\"):
# if this is a path, we are interested in the last part
test_name_or_path = test_name_or_path.split(splitter)[-1]
search_term = test_name_or_path
# If the failure line was not in the pipe symbol delimited format or the search term
# will likely return too many (or irrelevant) results (eg: too short or matches terms
# on the blacklist), then we fall back to searching for the entire failure line if
# it is suitable.
if not (search_term and is_helpful_search_term(search_term)):
if is_helpful_search_term(error_line):
search_term = error_line
else:
search_term = None
# Searching for extremely long search terms is undesirable, since:
# a) Bugzilla's max summary length is 256 characters, and once "Intermittent "
# and platform/suite information is prefixed, there are even fewer characters
# left for us to use for the failure string against which we need to match.
# b) For long search terms, the additional length does little to prevent against
# false positives, but means we're more susceptible to false negatives due to
# run-to-run variances in the error messages (eg paths, process IDs).
if search_term:
search_term = search_term[:100]
return search_term
def get_crash_signature(error_line):
"""
Detect if the error_line contains a crash signature
and return it if it's a helpful search term
"""
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term
def is_helpful_search_term(search_term):
# Search terms that will match too many bug summaries
# and so not result in useful suggestions.
search_term = search_term.strip()
blacklist = [
'automation.py',
'remoteautomation.py',
'Shutdown',
'undefined',
'Main app process exited normally',
'Traceback (most recent call last):',
'Return code: 0',
'Return code: 1',
'Return code: 2',
'Return code: 9',
'Return code: 10',
'Exiting 1',
'Exiting 9',
'CrashingThread(void *)',
'libSystem.B.dylib + 0xd7a',
'linux-gate.so + 0x424',
'TypeError: content is null',
'leakcheck'
]
return len(search_term) > 4 and not (search_term in blacklist)
def get_bugs_for_search_term(search, base_uri):
"""
Fetch the base_uri endpoint filtering on search and status.
Status must be either 'open' or 'closed'
"""
from treeherder.etl.common import fetch_json
params = {
'search': search
}
return fetch_json(base_uri, params=params)
def get_artifacts_that_need_bug_suggestions(artifact_list):
"""
Return a list of ``text_log_summary`` that don't have ``Bug suggestions``
"""
bs_guid_list = [x['job_guid'] for x in artifact_list if
x['name'] == 'Bug suggestions']
tls_list = [x for x in artifact_list if
x['name'] == 'text_log_summary' and
x['job_guid'] not in bs_guid_list]
return tls_list
def get_error_summary_artifacts(artifact_list):
"""
Create bug suggestions artifact(s) for any text_log_summary artifacts.
``artifact_list`` here is a list of artifacts that may contain one or more
``text_log_artifact`` objects. If it does, we extract the error lines
from it. If there ARE error lines, then we generate the
``bug suggestions`` artifacts and return them.
"""
bug_suggestion_artifacts = []
for artifact in artifact_list:
# this is the only artifact name eligible to trigger generation of bug
# suggestions.
if artifact['name'] != 'text_log_summary':
continue
all_errors = get_all_errors(artifact)
bug_suggestion_artifacts.append({
"job_guid": artifact['job_guid'],
"name": 'Bug suggestions',
"type": 'json',
"blob": json.dumps(get_error_summary(all_errors))
})
return bug_suggestion_artifacts
def get_all_errors(artifact):
"""Extract the error lines from an artifact's blob field"""
artifact_blob = json.loads(artifact['blob'])
if isinstance(artifact_blob, dict):
return artifact_blob.get('step_data', {}).get('all_errors', [])
def load_error_summary(project, artifacts, job_id_lookup):
"""Load new bug suggestions artifacts if we generate them."""
from treeherder.model.derived import ArtifactsModel
bsa = get_error_summary_artifacts(artifacts)
if bsa:
with ArtifactsModel(project) as artifacts_model:
artifacts_model.load_job_artifacts(bsa, job_id_lookup)
| mpl-2.0 |
ambitioninc/kmatch | kmatch/tests/mixin_tests.py | 1 | 1208 | import unittest
from kmatch import KmatchTestMixin
class MixinTestUsingMixin(KmatchTestMixin, unittest.TestCase):
def test_matches(self):
"""
Test .assertMatches() using the mixin on a true match
"""
self.assertKmatches(['<=', 'f', 0], {'f': -1})
def test_matches_raises_error(self):
"""
Test .assertMatches() using the mixin on a false match
"""
with self.assertRaises(AssertionError):
self.assertKmatches(['<=', 'f', 0], {'f': 1})
def test_not_matches(self):
"""
Test .assertNotMatches() using the mixin on a false match
"""
self.assertNotKmatches(['<=', 'f', 0], {'f': 1})
def test_not_matches_no_key_error(self):
"""
Test .assertNotMatches() using the mixin on a false match
"""
self.assertNotKmatches(['<=', 'f', 0], {'g': 1})
self.assertNotKmatches(['<=', 'f', 0], {'f': 1})
def test_not_matches_raises_error(self):
"""
Test .assertNotMatches() using the mixin raises an error on a match
"""
with self.assertRaises(AssertionError):
self.assertNotKmatches(['<=', 'f', 0], {'f': -1})
| mit |
mrkm4ntr/incubator-airflow | tests/test_utils/hdfs_utils.py | 8 | 7310 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False): # pylint: disable=invalid-name
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile',
}
]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [
{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory',
}
]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [
{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file',
},
]
elif path[0] == '/datadirectory/not_empty_directory':
return [
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file',
}
]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp',
},
]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
| apache-2.0 |
tima/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py | 16 | 13352 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork
version_added: "2.1"
short_description: Manage Azure virtual networks.
description:
- Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges
and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.
options:
resource_group:
description:
- name of resource group.
required: true
address_prefixes_cidr:
description:
- List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating
a new virtual network or using purge_address_prefixes.
aliases:
- address_prefixes
default: null
required: false
dns_servers:
description:
- Custom list of DNS servers. Maximum length of two. The first server in the list will be treated
as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the
specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to
default Azure servers.
default: null
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- name of the virtual network.
required: true
purge_address_prefixes:
description:
- Use with state present to remove any existing address_prefixes.
default: false
purge_dns_servers:
description:
- Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually
exclusive with dns_servers.
default: false
required: false
state:
description:
- Assert the state of the virtual network. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
address_prefixes_cidr:
- "10.1.0.0/16"
- "172.100.0.0/16"
dns_servers:
- "127.0.0.1"
- "127.0.0.2"
tags:
testing: testing
delete: on-exit
- name: Delete a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
state: absent
'''
RETURN = '''
state:
description: Current state of the virtual network.
returned: always
type: dict
sample: {
"address_prefixes": [
"10.1.0.0/16",
"172.100.0.0/16"
],
"dns_servers": [
"127.0.0.1",
"127.0.0.3"
],
"etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network",
"location": "eastus",
"name": "my_test_network",
"provisioning_state": "Succeeded",
"tags": null,
"type": "Microsoft.Network/virtualNetworks"
}
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN
def virtual_network_to_dict(vnet):
'''
Convert a virtual network object to a dict.
:param vnet: VirtualNet object
:return: dict
'''
results = dict(
id=vnet.id,
name=vnet.name,
location=vnet.location,
type=vnet.type,
tags=vnet.tags,
provisioning_state=vnet.provisioning_state,
etag=vnet.etag
)
if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
results['dns_servers'] = []
for server in vnet.dhcp_options.dns_servers:
results['dns_servers'].append(server)
if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
results['address_prefixes'] = []
for space in vnet.address_space.address_prefixes:
results['address_prefixes'].append(space)
return results
class AzureRMVirtualNetwork(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),
dns_servers=dict(type='list',),
purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),
purge_dns_servers=dict(type='bool', default=False),
)
mutually_exclusive = [
('dns_servers', 'purge_dns_servers')
]
required_if = [
('purge_address_prefixes', True, ['address_prefixes_cidr'])
]
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.address_prefixes_cidr = None
self.purge_address_prefixes = None
self.dns_servers = None
self.purge_dns_servers = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
self.results['check_mode'] = self.check_mode
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present' and self.purge_address_prefixes:
for prefix in self.address_prefixes_cidr:
if not CIDR_PATTERN.match(prefix):
self.fail("Parameter error: invalid address prefix value {0}".format(prefix))
if self.dns_servers and len(self.dns_servers) > 2:
self.fail("Parameter error: You can provide a maximum of 2 DNS servers.")
changed = False
results = dict()
try:
self.log('Fetching vnet {0}'.format(self.name))
vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)
results = virtual_network_to_dict(vnet)
self.log('Vnet exists {0}'.format(self.name))
self.log(results, pretty_print=True)
self.check_provisioning_state(vnet, self.state)
if self.state == 'present':
if self.address_prefixes_cidr:
existing_address_prefix_set = set(vnet.address_space.address_prefixes)
requested_address_prefix_set = set(self.address_prefixes_cidr)
missing_prefixes = requested_address_prefix_set - existing_address_prefix_set
extra_prefixes = existing_address_prefix_set - requested_address_prefix_set
if len(missing_prefixes) > 0:
self.log('CHANGED: there are missing address_prefixes')
changed = True
if not self.purge_address_prefixes:
# add the missing prefixes
for prefix in missing_prefixes:
results['address_prefixes'].append(prefix)
if len(extra_prefixes) > 0 and self.purge_address_prefixes:
self.log('CHANGED: there are address_prefixes to purge')
changed = True
# replace existing address prefixes with requested set
results['address_prefixes'] = self.address_prefixes_cidr
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
if self.dns_servers:
existing_dns_set = set(vnet.dhcp_options.dns_servers)
requested_dns_set = set(self.dns_servers)
if existing_dns_set != requested_dns_set:
self.log('CHANGED: replacing DNS servers')
changed = True
results['dns_servers'] = self.dns_servers
if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
self.log('CHANGED: purging existing DNS servers')
changed = True
results['dns_servers'] = []
elif self.state == 'absent':
self.log("CHANGED: vnet exists but requested state is 'absent'")
changed = True
except CloudError:
self.log('Vnet {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not results:
# create a new virtual network
self.log("Create virtual network {0}".format(self.name))
if not self.address_prefixes_cidr:
self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')
vnet = self.network_models.VirtualNetwork(
location=self.location,
address_space=self.network_models.AddressSpace(
address_prefixes=self.address_prefixes_cidr
)
)
if self.dns_servers:
vnet.dhcp_options = self.network_models.DhcpOptions(
dns_servers=self.dns_servers
)
if self.tags:
vnet.tags = self.tags
self.results['state'] = self.create_or_update_vnet(vnet)
else:
# update existing virtual network
self.log("Update virtual network {0}".format(self.name))
vnet = self.network_models.VirtualNetwork(
location=results['location'],
address_space=self.network_models.AddressSpace(
address_prefixes=results['address_prefixes']
),
tags=results['tags']
)
if results.get('dns_servers'):
vnet.dhcp_options = self.network_models.DhcpOptions(
dns_servers=results['dns_servers']
)
self.results['state'] = self.create_or_update_vnet(vnet)
elif self.state == 'absent':
self.delete_virtual_network()
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_vnet(self, vnet):
try:
poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
new_vnet = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
return virtual_network_to_dict(new_vnet)
def delete_virtual_network(self):
try:
poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMVirtualNetwork()
if __name__ == '__main__':
main()
| gpl-3.0 |
fredrik-johansson/mpmath | mpmath/calculus/quadrature.py | 1 | 42371 | import math
from ..libmp.backend import xrange
class QuadratureRule(object):
"""
Quadrature rules are implemented using this class, in order to
simplify the code and provide a common infrastructure
for tasks such as error estimation and node caching.
You can implement a custom quadrature rule by subclassing
:class:`QuadratureRule` and implementing the appropriate
methods. The subclass can then be used by :func:`~mpmath.quad` by
passing it as the *method* argument.
:class:`QuadratureRule` instances are supposed to be singletons.
:class:`QuadratureRule` therefore implements instance caching
in :func:`~mpmath.__new__`.
"""
def __init__(self, ctx):
self.ctx = ctx
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def clear(self):
"""
Delete cached node data.
"""
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def calc_nodes(self, degree, prec, verbose=False):
r"""
Compute nodes for the standard interval `[-1, 1]`. Subclasses
should probably implement only this method, and use
:func:`~mpmath.get_nodes` method to retrieve the nodes.
"""
raise NotImplementedError
def get_nodes(self, a, b, degree, prec, verbose=False):
"""
Return nodes for given interval, degree and precision. The
nodes are retrieved from a cache if already computed;
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
and are then cached.
Subclasses should probably not implement this method,
but just implement :func:`~mpmath.calc_nodes` for the actual
node computation.
"""
key = (a, b, degree, prec)
if key in self.transformed_cache:
return self.transformed_cache[key]
orig = self.ctx.prec
try:
self.ctx.prec = prec+20
# Get nodes on standard interval
if (degree, prec) in self.standard_cache:
nodes = self.standard_cache[degree, prec]
else:
nodes = self.calc_nodes(degree, prec, verbose)
self.standard_cache[degree, prec] = nodes
# Transform to general interval
nodes = self.transform_nodes(nodes, a, b, verbose)
if key in self.interval_count:
self.transformed_cache[key] = nodes
else:
self.interval_count[key] = True
finally:
self.ctx.prec = orig
return nodes
def transform_nodes(self, nodes, a, b, verbose=False):
r"""
Rescale standardized nodes (for `[-1, 1]`) to a general
interval `[a, b]`. For a finite interval, a simple linear
change of variables is used. Otherwise, the following
transformations are used:
.. math ::
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
"""
ctx = self.ctx
a = ctx.convert(a)
b = ctx.convert(b)
one = ctx.one
if (a, b) == (-one, one):
return nodes
half = ctx.mpf(0.5)
new_nodes = []
if ctx.isinf(a) or ctx.isinf(b):
if (a, b) == (ctx.ninf, ctx.inf):
p05 = -half
for x, w in nodes:
x2 = x*x
px1 = one-x2
spx1 = px1**p05
x = x*spx1
w *= spx1/px1
new_nodes.append((x, w))
elif a == ctx.ninf:
b1 = b+1
for x, w in nodes:
u = 2/(x+one)
x = b1-u
w *= half*u**2
new_nodes.append((x, w))
elif b == ctx.inf:
a1 = a-1
for x, w in nodes:
u = 2/(x+one)
x = a1+u
w *= half*u**2
new_nodes.append((x, w))
elif a == ctx.inf or b == ctx.ninf:
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
else:
raise NotImplementedError
else:
# Simple linear change of variables
C = (b-a)/2
D = (b+a)/2
for x, w in nodes:
new_nodes.append((D+C*x, C*w))
return new_nodes
def guess_degree(self, prec):
"""
Given a desired precision `p` in bits, estimate the degree `m`
of the quadrature required to accomplish full accuracy for
typical integrals. By default, :func:`~mpmath.quad` will perform up
to `m` iterations. The value of `m` should be a slight
overestimate, so that "slightly bad" integrals can be dealt
with automatically using a few extra iterations. On the
other hand, it should not be too big, so :func:`~mpmath.quad` can
quit within a reasonable amount of time when it is given
an "unsolvable" integral.
The default formula used by :func:`~mpmath.guess_degree` is tuned
for both :class:`TanhSinh` and :class:`GaussLegendre`.
The output is roughly as follows:
+---------+---------+
| `p` | `m` |
+=========+=========+
| 50 | 6 |
+---------+---------+
| 100 | 7 |
+---------+---------+
| 500 | 10 |
+---------+---------+
| 3000 | 12 |
+---------+---------+
This formula is based purely on a limited amount of
experimentation and will sometimes be wrong.
"""
# Expected degree
# XXX: use mag
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
# Reasonable "worst case"
g += 2
return g
def estimate_error(self, results, prec, epsilon):
r"""
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
the error of `I_k`.
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
that each degree increment roughly doubles the accuracy of
the quadrature rule (this is true for both :class:`TanhSinh`
and :class:`GaussLegendre`). The extrapolation formula is given
by Borwein, Bailey & Girgensohn. Although not very conservative,
this method seems to be very robust in practice.
"""
if len(results) == 2:
return abs(results[0]-results[1])
try:
if results[-1] == results[-2] == results[-3]:
return self.ctx.zero
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
except ValueError:
return epsilon
D3 = -prec
D4 = min(0, max(D1**2/D2, 2*D1, D3))
return self.ctx.mpf(10) ** int(D4)
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
"""
Main integration function. Computes the 1D integral over
the interval specified by *points*. For each subinterval,
performs quadrature of degree from 1 up to *max_degree*
until :func:`~mpmath.estimate_error` signals convergence.
:func:`~mpmath.summation` transforms each subintegration to
the standard interval and then calls :func:`~mpmath.sum_next`.
"""
ctx = self.ctx
I = total_err = ctx.zero
for i in xrange(len(points)-1):
a, b = points[i], points[i+1]
if a == b:
continue
# XXX: we could use a single variable transformation,
# but this is not good in practice. We get better accuracy
# by having 0 as an endpoint.
if (a, b) == (ctx.ninf, ctx.inf):
_f = f
f = lambda x: _f(-x) + _f(x)
a, b = (ctx.zero, ctx.inf)
results = []
err = ctx.zero
for degree in xrange(1, max_degree+1):
nodes = self.get_nodes(a, b, degree, prec, verbose)
if verbose:
print("Integrating from %s to %s (degree %s of %s)" % \
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
result = self.sum_next(f, nodes, degree, prec, results, verbose)
results.append(result)
if degree > 1:
err = self.estimate_error(results, prec, epsilon)
if verbose:
print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
if err <= epsilon:
break
I += results[-1]
total_err += err
if total_err > epsilon:
if verbose:
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
return I, total_err
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
r"""
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
contains the `(w_k, x_k)` pairs.
:func:`~mpmath.summation` will supply the list *results* of
values computed by :func:`~mpmath.sum_next` at previous degrees, in
case the quadrature rule is able to reuse them.
"""
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
class TanhSinh(QuadratureRule):
r"""
This class implements "tanh-sinh" or "doubly exponential"
quadrature. This quadrature rule is based on the Euler-Maclaurin
integral formula. By performing a change of variables involving
nested exponentials / hyperbolic functions (hence the name), the
derivatives at the endpoints vanish rapidly. Since the error term
in the Euler-Maclaurin formula depends on the derivatives at the
endpoints, a simple step sum becomes extremely accurate. In
practice, this means that doubling the number of evaluation
points roughly doubles the number of accurate digits.
Comparison to Gauss-Legendre:
* Initial computation of nodes is usually faster
* Handles endpoint singularities better
* Handles infinite integration intervals better
* Is slower for smooth integrands once nodes have been computed
The implementation of the tanh-sinh algorithm is based on the
description given in Borwein, Bailey & Girgensohn, "Experimentation
in Mathematics - Computational Paths to Discovery", A K Peters,
2003, pages 312-313. In the present implementation, a few
improvements have been made:
* A more efficient scheme is used to compute nodes (exploiting
recurrence for the exponential function)
* The nodes are computed successively instead of all at once
**References**
* [Bailey]_
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
"""
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
"""
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
fact that half of the abscissas at degree `m` are precisely the
abscissas from degree `m-1`. Thus reusing the result from
the previous level allows a 2x speedup.
"""
h = self.ctx.mpf(2)**(-degree)
# Abscissas overlap, so reusing saves half of the time
if previous:
S = previous[-1]/(h*2)
else:
S = self.ctx.zero
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
return h*S
def calc_nodes(self, degree, prec, verbose=False):
r"""
The abscissas and weights for tanh-sinh quadrature of degree
`m` are given by
.. math::
x_k = \tanh(\pi/2 \sinh(t_k))
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
list of nodes is actually infinite, but the weights die off so
rapidly that only a few are needed.
"""
ctx = self.ctx
nodes = []
extra = 20
ctx.prec += extra
tol = ctx.ldexp(1, -prec-10)
pi4 = ctx.pi/4
# For simplicity, we work in steps h = 1/2^n, with the first point
# offset so that we can reuse the sum from the previous degree
# We define degree 1 to include the "degree 0" steps, including
# the point x = 0. (It doesn't work well otherwise; not sure why.)
t0 = ctx.ldexp(1, -degree)
if degree == 1:
#nodes.append((mpf(0), pi4))
#nodes.append((-mpf(0), pi4))
nodes.append((ctx.zero, ctx.pi/2))
h = t0
else:
h = t0*2
# Since h is fixed, we can compute the next exponential
# by simply multiplying by exp(h)
expt0 = ctx.exp(t0)
a = pi4 * expt0
b = pi4 / expt0
udelta = ctx.exp(h)
urdelta = 1/udelta
for k in xrange(0, 20*2**degree+1):
# Reference implementation:
# t = t0 + k*h
# x = tanh(pi/2 * sinh(t))
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
c = ctx.exp(a-b)
d = 1/c
co = (c+d)/2
si = (c-d)/2
x = si / co
w = (a+b) / co**2
diff = abs(x-1)
if diff <= tol:
break
nodes.append((x, w))
nodes.append((-x, w))
a *= udelta
b *= urdelta
if verbose and k % 300 == 150:
# Note: the number displayed is rather arbitrary. Should
# figure out how to print something that looks more like a
# percentage
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
ctx.prec -= extra
return nodes
class GaussLegendre(QuadratureRule):
r"""
This class implements Gauss-Legendre quadrature, which is
exceptionally efficient for polynomials and polynomial-like (i.e.
very smooth) integrands.
The abscissas and weights are given by roots and values of
Legendre polynomials, which are the orthogonal polynomials
on `[-1, 1]` with respect to the unit weight
(see :func:`~mpmath.legendre`).
In this implementation, we take the "degree" `m` of the quadrature
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
than linear, convergence as the degree is incremented.
Comparison to tanh-sinh quadrature:
* Is faster for smooth integrands once nodes have been computed
* Initial computation of nodes is usually slower
* Handles endpoint singularities worse
* Handles infinite integration intervals worse
"""
def calc_nodes(self, degree, prec, verbose=False):
r"""
Calculates the abscissas and weights for Gauss-Legendre
quadrature of degree of given degree (actually `3 \cdot 2^m`).
"""
ctx = self.ctx
# It is important that the epsilon is set lower than the
# "real" epsilon
epsilon = ctx.ldexp(1, -prec-8)
# Fairly high precision might be required for accurate
# evaluation of the roots
orig = ctx.prec
ctx.prec = int(prec*1.5)
if degree == 1:
x = ctx.sqrt(ctx.mpf(3)/5)
w = ctx.mpf(5)/9
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
ctx.prec = orig
return nodes
nodes = []
n = 3*2**(degree-1)
upto = n//2 + 1
for j in xrange(1, upto):
# Asymptotic formula for the roots
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
# Newton iteration
while 1:
t1, t2 = 1, 0
# Evaluates the Legendre polynomial using its defining
# recurrence relation
for j1 in xrange(1,n+1):
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
t4 = n*(r*t1-t2)/(r**2-1)
a = t1/t4
r = r - a
if abs(a) < epsilon:
break
x = r
w = 2/((1-r**2)*t4**2)
if verbose and j % 30 == 15:
print("Computing nodes (%i of %i)" % (j, upto))
nodes.append((x, w))
nodes.append((-x, w))
ctx.prec = orig
return nodes
class QuadratureMethods(object):
def __init__(ctx, *args, **kwargs):
ctx._gauss_legendre = GaussLegendre(ctx)
ctx._tanh_sinh = TanhSinh(ctx)
def quad(ctx, f, *points, **kwargs):
r"""
Computes a single, double or triple integral over a given
1D interval, 2D rectangle, or 3D cuboid. A basic example::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(sin, [0, pi])
2.0
A basic 2D integral::
>>> f = lambda x, y: cos(x+y/2)
>>> quad(f, [-pi/2, pi/2], [0, pi])
4.0
**Interval format**
The integration range for each dimension may be specified
using a list or tuple. Arguments are interpreted as follows:
``quad(f, [x1, x2])`` -- calculates
`\int_{x_1}^{x_2} f(x) \, dx`
``quad(f, [x1, x2], [y1, y2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
\, dz \, dy \, dx`
Endpoints may be finite or infinite. An interval descriptor
may also contain more than two points. In this
case, the integration is split into subintervals, between
each pair of consecutive points. This is useful for
dealing with mid-interval discontinuities, or integrating
over large intervals where the function is irregular or
oscillates.
**Options**
:func:`~mpmath.quad` recognizes the following keyword arguments:
*method*
Chooses integration algorithm (described below).
*error*
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
integral and `e` is the estimated error.
*maxdegree*
Maximum degree of the quadrature rule to try before
quitting.
*verbose*
Print details about progress.
**Algorithms**
Mpmath presently implements two integration algorithms: tanh-sinh
quadrature and Gauss-Legendre quadrature. These can be selected
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
as shortcuts.
Both algorithms have the property that doubling the number of
evaluation points roughly doubles the accuracy, so both are ideal
for high precision quadrature (hundreds or thousands of digits).
At high precision, computing the nodes and weights for the
integration can be expensive (more expensive than computing the
function values). To make repeated integrations fast, nodes
are automatically cached.
The advantages of the tanh-sinh algorithm are that it tends to
handle endpoint singularities well, and that the nodes are cheap
to compute on the first run. For these reasons, it is used by
:func:`~mpmath.quad` as the default algorithm.
Gauss-Legendre quadrature often requires fewer function
evaluations, and is therefore often faster for repeated use, but
the algorithm does not handle endpoint singularities as well and
the nodes are more expensive to compute. Gauss-Legendre quadrature
can be a better choice if the integrand is smooth and repeated
integrations are required (e.g. for multiple integrals).
See the documentation for :class:`TanhSinh` and
:class:`GaussLegendre` for additional details.
**Examples of 1D integrals**
Intervals may be infinite or half-infinite. The following two
examples evaluate the limits of the inverse tangent function
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
>>> mp.dps = 15
>>> quad(lambda x: 2/(x**2+1), [0, inf])
3.14159265358979
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
3.14159265358979
Integrals can typically be resolved to high precision.
The following computes 50 digits of `\pi` by integrating the
area of the half-circle defined by `x^2 + y^2 \le 1`,
`-1 \le x \le 1`, `y \ge 0`::
>>> mp.dps = 50
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
3.1415926535897932384626433832795028841971693993751
One can just as well compute 1000 digits (output truncated)::
>>> mp.dps = 1000
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
3.141592653589793238462643383279502884...216420199
Complex integrals are supported. The following computes
a residue at `z = 0` by integrating counterclockwise along the
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
>>> mp.dps = 15
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
(0.0 + 6.28318530717959j)
**Examples of 2D and 3D integrals**
Here are several nice examples of analytically solvable
2D integrals (taken from MathWorld [1]) that can be evaluated
to high precision fairly rapidly by :func:`~mpmath.quad`::
>>> mp.dps = 30
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
>>> quad(f, [0, 1], [0, 1])
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
>>> quad(f, [-1, 1], [-1, 1])
3.17343648530607134219175646705
>>> 4*log(2+sqrt(3))-2*pi/3
3.17343648530607134219175646705
>>> f = lambda x, y: 1/(1-x**2 * y**2)
>>> quad(f, [0, 1], [0, 1])
1.23370055013616982735431137498
>>> pi**2 / 8
1.23370055013616982735431137498
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
1.64493406684822643647241516665
>>> pi**2 / 6
1.64493406684822643647241516665
Multiple integrals may be done over infinite ranges::
>>> mp.dps = 15
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
0.367879441171442
>>> print(1/e)
0.367879441171442
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
For example, we can replicate the earlier example of calculating
`\pi` by integrating over the unit-circle, and actually use double
quadrature to actually measure the area circle::
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
>>> quad(f, [-1, 1])
3.14159265358979
Here is a simple triple integral::
>>> mp.dps = 15
>>> f = lambda x,y,z: x*y/(1+z)
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
0.101366277027041
>>> (log(3)-log(2))/4
0.101366277027041
**Singularities**
Both tanh-sinh and Gauss-Legendre quadrature are designed to
integrate smooth (infinitely differentiable) functions. Neither
algorithm copes well with mid-interval singularities (such as
mid-interval discontinuities in `f(x)` or `f'(x)`).
The best solution is to split the integral into parts::
>>> mp.dps = 15
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
3.99900894176779
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
4.0
The tanh-sinh rule often works well for integrands having a
singularity at one or both endpoints::
>>> mp.dps = 15
>>> quad(log, [0, 1], method='tanh-sinh') # Good
-1.0
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
-0.999932197413801
However, the result may still be inaccurate for some functions::
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
1.99999999946942
This problem is not due to the quadrature rule per se, but to
numerical amplification of errors in the nodes. The problem can be
circumvented by temporarily increasing the precision::
>>> mp.dps = 30
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
>>> mp.dps = 15
>>> +a
2.0
**Highly variable functions**
For functions that are smooth (in the sense of being infinitely
differentiable) but contain sharp mid-interval peaks or many
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
example, with default settings, :func:`~mpmath.quad` is able to integrate
`\sin(x)` accurately over an interval of length 100 but not over
length 1000::
>>> quad(sin, [0, 100]); 1-cos(100) # Good
0.137681127712316
0.137681127712316
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
-37.8587612408485
0.437620923709297
One solution is to break the integration into 10 intervals of
length 100::
>>> quad(sin, linspace(0, 1000, 10)) # Good
0.437620923709297
Another is to increase the degree of the quadrature::
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
0.437620923709297
Whether splitting the interval or increasing the degree is
more efficient differs from case to case. Another example is the
function `1/(1+x^2)`, which has a sharp peak centered around
`x = 0`::
>>> f = lambda x: 1/(1+x**2)
>>> quad(f, [-100, 100]) # Bad
3.64804647105268
>>> quad(f, [-100, 100], maxdegree=10) # Good
3.12159332021646
>>> quad(f, [-100, 0, 100]) # Also good
3.12159332021646
**References**
1. http://mathworld.wolfram.com/DoubleIntegral.html
"""
rule = kwargs.get('method', 'tanh-sinh')
if type(rule) is str:
if rule == 'tanh-sinh':
rule = ctx._tanh_sinh
elif rule == 'gauss-legendre':
rule = ctx._gauss_legendre
else:
raise ValueError("unknown quadrature rule: %s" % rule)
else:
rule = rule(ctx)
verbose = kwargs.get('verbose')
dim = len(points)
orig = prec = ctx.prec
epsilon = ctx.eps/8
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
points = [ctx._as_points(p) for p in points]
try:
ctx.prec += 20
if dim == 1:
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
elif dim == 2:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: f(x,y), \
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
elif dim == 3:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: \
rule.summation(lambda z: f(x,y,z), \
points[2], prec, epsilon, m)[0],
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
else:
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
finally:
ctx.prec = orig
if kwargs.get("error"):
return +v, err
return +v
def quadts(ctx, *args, **kwargs):
"""
Performs tanh-sinh quadrature. The call
quadts(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=TanhSinh)
For example, a single integral and a double integral:
quadts(lambda x: exp(cos(x)), [0, 1])
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'tanh-sinh'
return ctx.quad(*args, **kwargs)
def quadgl(ctx, *args, **kwargs):
"""
Performs Gauss-Legendre quadrature. The call
quadgl(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=GaussLegendre)
For example, a single integral and a double integral:
quadgl(lambda x: exp(cos(x)), [0, 1])
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'gauss-legendre'
return ctx.quad(*args, **kwargs)
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
r"""
Calculates
.. math ::
I = \int_a^b f(x) dx
where at least one of `a` and `b` is infinite and where
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
can also handle oscillatory integrals where the oscillation
rate is different from a pure sine or cosine wave.
In the standard case when `|a| < \infty, b = \infty`,
:func:`~mpmath.quadosc` works by evaluating the infinite series
.. math ::
I = \int_a^{x_1} f(x) dx +
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
where `x_k` are consecutive zeros (alternatively
some other periodic reference point) of `f(x)`.
Accordingly, :func:`~mpmath.quadosc` requires information about the
zeros of `f(x)`. For a periodic function, you can specify
the zeros by either providing the angular frequency `\omega`
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
specify the `n`-th zero by providing the *zeros* arguments.
Below is an example of each::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: sin(3*x)/(x**2+1)
>>> quadosc(f, [0,inf], omega=3)
0.37833007080198
>>> quadosc(f, [0,inf], period=2*pi/3)
0.37833007080198
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
0.37833007080198
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
0.37833007080198
Note that *zeros* was specified to multiply `n` by the
*half-period*, not the full period. In theory, it does not matter
whether each partial integral is done over a half period or a full
period. However, if done over half-periods, the infinite series
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
typically makes the extrapolation much more efficient.
Here is an example of an integration over the entire real line,
and a half-infinite integration starting at `-\infty`::
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
1.15572734979092
>>> pi/e
1.15572734979092
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
-0.0844109505595739
>>> cos(1)+si(1)-pi/2
-0.0844109505595738
Of course, the integrand may contain a complex exponential just as
well as a real sine or cosine::
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
(0.156410688228254 + 0.0j)
>>> pi/e**3
0.156410688228254
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
(0.00317486988463794 - 0.0447701735209082j)
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
(0.00317486988463794 - 0.0447701735209082j)
**Non-periodic functions**
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
strictly periodic, *omega* or *period* might not work, and it might
be necessary to use *zeros*.
A notable exception can be made for Bessel functions which, though not
periodic, are "asymptotically periodic" in a sufficiently strong sense
that the sum extrapolation will work out::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
More properly, one should provide the exact Bessel function zeros::
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
>>> quadosc(j0, [0, inf], zeros=j0zero)
1.0
For an example where *zeros* becomes necessary, consider the
complete Fresnel integrals
.. math ::
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
= \sqrt{\frac{\pi}{8}}.
Although the integrands do not decrease in magnitude as
`x \to \infty`, the integrals are convergent since the oscillation
rate increases (causing consecutive periods to asymptotically
cancel out). These integrals are virtually impossible to calculate
to any kind of accuracy using standard quadrature rules. However,
if one provides the correct asymptotic distribution of zeros
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
>>> mp.dps = 30
>>> f = lambda x: cos(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> f = lambda x: sin(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> sqrt(pi/8)
0.626657068657750125603941321203
(Interestingly, these integrals can still be evaluated if one
places some other constant than `\pi` in the square root sign.)
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
the inverse-function distribution `h^{-1}(x)`::
>>> mp.dps = 15
>>> f = lambda x: sin(exp(x))
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
-0.25024394235267
>>> pi/2-si(e)
-0.250243942352671
**Non-alternating functions**
If the integrand oscillates around a positive value, without
alternating signs, the extrapolation might fail. A simple trick
that sometimes works is to multiply or divide the frequency by 2::
>>> f = lambda x: 1/x**2+sin(x)/x**4
>>> quadosc(f, [1,inf], omega=1) # Bad
1.28642190869861
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
1.28652953559617
>>> 1+(cos(1)+ci(1)+sin(1))/6
1.28652953559617
**Fast decay**
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
integrands. If the integrand decreases exponentially or faster,
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
much faster than :func:`~mpmath.quadosc`)::
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
0.5
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
0.5
"""
a, b = ctx._as_points(interval)
a = ctx.convert(a)
b = ctx.convert(b)
if [omega, period, zeros].count(None) != 2:
raise ValueError( \
"must specify exactly one of omega, period, zeros")
if a == ctx.ninf and b == ctx.inf:
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
return s1 + s2
if a == ctx.ninf:
if zeros:
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
else:
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
if b != ctx.inf:
raise ValueError("quadosc requires an infinite integration interval")
if not zeros:
if omega:
period = 2*ctx.pi/omega
zeros = lambda n: n*period/2
#for n in range(1,10):
# p = zeros(n)
# if p > a:
# break
#if n >= 9:
# raise ValueError("zeros do not appear to be correctly indexed")
n = 1
s = ctx.quadgl(f, [a, zeros(n)])
def term(k):
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
s += ctx.nsum(term, [n, ctx.inf])
return s
def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
"""
Computes the integral of *f* over the interval or path specified
by *interval*, using :func:`~mpmath.quad` together with adaptive
subdivision of the interval.
This function gives an accurate answer for some integrals where
:func:`~mpmath.quad` fails::
>>> mp.dps = 15; mp.pretty = True
>>> quad(lambda x: abs(sin(x)), [0, 2*pi])
3.99900894176779
>>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
4.0
>>> quadsubdiv(sin, [0, 1000])
0.437620923709297
>>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
3.12159332021646
>>> quadsubdiv(lambda x: ceil(x), [0, 100])
5050.0
>>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
0.347400172657248
The argument *maxintervals* can be set to limit the permissible
subdivision::
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
(-5.40487904307774, 5.011)
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
(0.631417921866934, 1.10101120134116e-17)
Subdivision does not guarantee a correct answer since, the error
estimate on subintervals may be inaccurate::
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
(0.209736068833883, 1.00011000000001e-18)
>>> mp.dps = 20
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
(0.21080273550054927738, 2.200000001e-24)
The second answer is correct. We can get an accurate result at lower
precision by forcing a finer initial subdivision::
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
0.210802735500549
The following integral is too oscillatory for convergence, but we can get a
reasonable estimate::
>>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
>>> round(v, 6), round(err, 6)
(0.504067, 1e-06)
>>> sin(1) - ci(1)
0.504067061906928
"""
queue = []
for i in range(len(interval)-1):
queue.append((interval[i], interval[i+1]))
total = ctx.zero
total_error = ctx.zero
if maxintervals is None:
maxintervals = 10 * ctx.prec
count = 0
quad_args = kwargs.copy()
quad_args["verbose"] = False
quad_args["error"] = True
if tol is None:
tol = +ctx.eps
orig = ctx.prec
try:
ctx.prec += 5
while queue:
a, b = queue.pop()
s, err = ctx.quad(f, [a, b], **quad_args)
if kwargs.get("verbose"):
print("subinterval", count, a, b, err)
if err < tol or count > maxintervals:
total += s
total_error += err
else:
count += 1
if count == maxintervals and kwargs.get("verbose"):
print("warning: number of intervals exceeded maxintervals")
if a == -ctx.inf and b == ctx.inf:
m = 0
elif a == -ctx.inf:
m = min(b-1, 2*b)
elif b == ctx.inf:
m = max(a+1, 2*a)
else:
m = a + (b - a) / 2
queue.append((a, m))
queue.append((m, b))
finally:
ctx.prec = orig
if kwargs.get("error"):
return +total, +total_error
else:
return +total
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
Endika/OpenUpgrade | addons/l10n_fr/report/compute_resultant_report.py | 374 | 4004 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class cdr(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(cdr, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(cdr, self).set_context(objects, data, ids)
self._load('cdr', self.localcontext['data']['form'])
self._set_variable(
'ct1',
self.localcontext['cdrc1']+self.localcontext['cdrc2']+self.localcontext['cdrc3']+
self.localcontext['cdrc4']+self.localcontext['cdrc5']+self.localcontext['cdrc6']+
self.localcontext['cdrc7']+self.localcontext['cdrc8']+self.localcontext['cdrc9']+
self.localcontext['cdrc10']+self.localcontext['cdrc11']+self.localcontext['cdrc12']+
self.localcontext['cdrc13']+self.localcontext['cdrc14']+self.localcontext['cdrc15']
)
self._set_variable(
'ct3',
self.localcontext['cdrc17']+self.localcontext['cdrc18']+self.localcontext['cdrc19']+
self.localcontext['cdrc20']
)
self._set_variable(
'ct4',
self.localcontext['cdrc21']+self.localcontext['cdrc22']+self.localcontext['cdrc23']
)
self._set_variable(
'charges',
self.localcontext['ct1']+self.localcontext['cdrc16']+self.localcontext['ct3']+
self.localcontext['ct4']+self.localcontext['cdrc24']+self.localcontext['cdrc25']
)
self._set_variable(
'pta',
self.localcontext['cdrp1']+self.localcontext['cdrp2']
)
self._set_variable(
'ptb',
self.localcontext['cdrp3']+self.localcontext['cdrp4']+self.localcontext['cdrp5']+
self.localcontext['cdrp6']+self.localcontext['cdrp7']
)
self._set_variable(
'pt1',
self.localcontext['pta']+self.localcontext['ptb']
)
self._set_variable(
'pt3',
self.localcontext['cdrp9']+self.localcontext['cdrp10']+self.localcontext['cdrp11']+
self.localcontext['cdrp12']+self.localcontext['cdrp13']+self.localcontext['cdrp14']
)
self._set_variable(
'pt4',
self.localcontext['cdrp15']+self.localcontext['cdrp16']+self.localcontext['cdrp17']
)
self._set_variable(
'produits',
self.localcontext['pt1']+self.localcontext['cdrp8']+self.localcontext['pt3']+
self.localcontext['pt4']
)
class wrapped_report_resultat(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrresultat'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrresultat'
_wrapped_report_class = cdr
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
justathoughtor2/atomicApe | cygwin/lib/python2.7/site-packages/pylint/checkers/newstyle.py | 3 | 6974 | # Copyright (c) 2005-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""check for new / old style related problems
"""
import sys
import astroid
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
node_frame_class,
has_known_bases
)
MSGS = {
'E1001': ('Use of __slots__ on an old style class',
'slots-on-old-class',
'Used when an old style class uses the __slots__ attribute.',
{'maxversion': (3, 0)}),
'E1002': ('Use of super on an old style class',
'super-on-old-class',
'Used when an old style class uses the super builtin.',
{'maxversion': (3, 0)}),
'E1003': ('Bad first argument %r given to super()',
'bad-super-call',
'Used when another argument than the current class is given as \
first argument of the super builtin.'),
'E1004': ('Missing argument to super()',
'missing-super-argument',
'Used when the super builtin didn\'t receive an \
argument.',
{'maxversion': (3, 0)}),
'W1001': ('Use of "property" on an old style class',
'property-on-old-class',
'Used when Pylint detect the use of the builtin "property" \
on an old style class while this is relying on new style \
classes features.',
{'maxversion': (3, 0)}),
'C1001': ('Old-style class defined.',
'old-style-class',
'Used when a class is defined that does not inherit from another'
'class and does not inherit explicitly from "object".',
{'maxversion': (3, 0)})
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'newstyle'
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
@check_messages('slots-on-old-class', 'old-style-class')
def visit_classdef(self, node):
""" Check __slots__ in old style classes and old
style class definition.
"""
if '__slots__' in node and not node.newstyle:
confidence = (INFERENCE if has_known_bases(node)
else INFERENCE_FAILURE)
self.add_message('slots-on-old-class', node=node,
confidence=confidence)
# The node type could be class, exception, metaclass, or
# interface. Presumably, the non-class-type nodes would always
# have an explicit base class anyway.
if not node.bases and node.type == 'class' and not node.metaclass():
# We use confidence HIGH here because this message should only ever
# be emitted for classes at the root of the inheritance hierarchyself.
self.add_message('old-style-class', node=node, confidence=HIGH)
@check_messages('property-on-old-class')
def visit_call(self, node):
"""check property usage"""
parent = node.parent.frame()
if (isinstance(parent, astroid.ClassDef) and
not parent.newstyle and
isinstance(node.func, astroid.Name)):
confidence = (INFERENCE if has_known_bases(parent)
else INFERENCE_FAILURE)
name = node.func.name
if name == 'property':
self.add_message('property-on-old-class', node=node,
confidence=confidence)
@check_messages('super-on-old-class', 'bad-super-call', 'missing-super-argument')
def visit_functiondef(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(astroid.Call):
if node_frame_class(stmt) != node_frame_class(node):
# Don't look down in other scopes.
continue
expr = stmt.func
if not isinstance(expr, astroid.Attribute):
continue
call = expr.expr
# skip the test if using super
if not (isinstance(call, astroid.Call) and
isinstance(call.func, astroid.Name) and
call.func.name == 'super'):
continue
if not klass.newstyle and has_known_bases(klass):
# super should not be used on an old style class
self.add_message('super-on-old-class', node=node)
else:
# super first arg should be the class
if not call.args and sys.version_info[0] == 3:
# unless Python 3
continue
try:
supcls = (call.args and next(call.args[0].infer())
or None)
except astroid.InferenceError:
continue
if supcls is None:
self.add_message('missing-super-argument', node=call)
continue
if klass is not supcls:
name = None
# if supcls is not YES, then supcls was infered
# and use its name. Otherwise, try to look
# for call.args[0].name
if supcls is not astroid.YES:
name = supcls.name
else:
if hasattr(call.args[0], 'name'):
name = call.args[0].name
if name is not None:
self.add_message('bad-super-call', node=call, args=(name, ))
visit_asyncfunctiondef = visit_functiondef
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
| gpl-3.0 |
piffey/ansible | lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py | 78 | 7303 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity_policy
short_description: Manages SES sending authorization policies
description:
- This module allows the user to manage sending authorization policies associated with an SES identity (email or domain).
- SES authorization sending policies can be used to control what actors are able to send email
on behalf of the validated identity and what conditions must be met by the sent emails.
version_added: "2.6"
author: Ed Costello (@orthanc)
options:
identity:
description: |
The SES identity to attach or remove a policy from. This can be either the full ARN or just
the verified email or domain.
required: true
policy_name:
description: The name used to identify the policy within the scope of the identity it's attached to.
required: true
policy:
description: A properly formated JSON sending authorization policy. Required when I(state=present).
state:
description: Whether to create(or update) or delete the authorization policy on the identity.
default: present
choices: [ 'present', 'absent' ]
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: add sending authorization policy to domain identity
aws_ses_identity_policy:
identity: example.com
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: add sending authorization policy to email identity
aws_ses_identity_policy:
identity: [email protected]
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: add sending authorization policy to identity using ARN
aws_ses_identity_policy:
identity: "arn:aws:ses:us-east-1:12345678:identity/example.com"
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: remove sending authorization policy
aws_ses_identity_policy:
identity: example.com
policy_name: ExamplePolicy
state: absent
'''
RETURN = '''
policies:
description: A list of all policies present on the identity after the operation.
returned: success
type: list
sample: [ExamplePolicy]
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import compare_policies, AWSRetry
import json
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
def get_identity_policy(connection, module, identity, policy_name):
try:
response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
policies = response['Policies']
if policy_name in policies:
return policies[policy_name]
return None
def create_or_update_identity_policy(connection, module):
identity = module.params.get('identity')
policy_name = module.params.get('policy_name')
required_policy = module.params.get('policy')
required_policy_dict = json.loads(required_policy)
changed = False
policy = get_identity_policy(connection, module, identity, policy_name)
policy_dict = json.loads(policy) if policy else None
if compare_policies(policy_dict, required_policy_dict):
changed = True
try:
if not module.check_mode:
connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
# Load the list of applied policies to include in the response.
# In principle we should be able to just return the response, but given
# eventual consistency behaviours in AWS it's plausible that we could
# end up with a list that doesn't contain the policy we just added.
# So out of paranoia check for this case and if we're missing the policy
# just make sure it's present.
#
# As a nice side benefit this also means the return is correct in check mode
try:
policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to list identity policies')
if policy_name is not None and policy_name not in policies_present:
policies_present = list(policies_present)
policies_present.append(policy_name)
module.exit_json(
changed=changed,
policies=policies_present,
)
def delete_identity_policy(connection, module):
identity = module.params.get('identity')
policy_name = module.params.get('policy_name')
changed = False
try:
policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to list identity policies')
if policy_name in policies_present:
try:
if not module.check_mode:
connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
changed = True
policies_present = list(policies_present)
policies_present.remove(policy_name)
module.exit_json(
changed=changed,
policies=policies_present,
)
def main():
module = AnsibleAWSModule(
argument_spec={
'identity': dict(required=True, type='str'),
'state': dict(default='present', choices=['present', 'absent']),
'policy_name': dict(required=True, type='str'),
'policy': dict(type='json', default=None),
},
required_if=[['state', 'present', ['policy']]],
supports_check_mode=True,
)
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
create_or_update_identity_policy(connection, module)
else:
delete_identity_policy(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
oktie/linkedct | ctdjango/chardet/universaldetector.py | 190 | 6635 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from latin1prober import Latin1Prober # windows-1252
from mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from sbcsgroupprober import SBCSGroupProber # single-byte character sets
from escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(r'[\x80-\xFF]')
self._escDetector = re.compile(r'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = constants.False
self._mStart = constants.True
self._mGotData = constants.False
self._mInputState = ePureAscii
self._mLastChar = ''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done: return
aLen = len(aBuf)
if not aLen: return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == '\xEF\xBB\xBF':
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == '\xFF\xFE\x00\x00':
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == '\x00\x00\xFE\xFF':
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == '\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412", 'confidence': 1.0}
elif aBuf[:4] == '\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143", 'confidence': 1.0}
elif aBuf[:2] == '\xFF\xFE':
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == '\xFE\xFF':
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = constants.True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = constants.True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif (self._mInputState == ePureAscii) and self._escDetector.search(self._mLastChar + aBuf):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = constants.True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = constants.True
break
def close(self):
if self.done: return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = constants.True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober: continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober: continue
sys.stderr.write('%s confidence = %s\n' % \
(prober.get_charset_name(), \
prober.get_confidence()))
| apache-2.0 |
OBIGOGIT/etch | binding-python/runtime/src/test/python/tests/binding/support/TestValidator_string.py | 6 | 1079 | # Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
import unittest
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Observer-Wu/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py | 121 | 3180 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config import urls
from webkitpy.tool.grammar import join_with_separators
from webkitpy.tool.steps.abstractstep import AbstractStep
class PrepareChangeLogForRevert(AbstractStep):
@classmethod
def _message_for_revert(cls, revision_list, reason, bug_url=None):
message = "Unreviewed, rolling out %s.\n" % join_with_separators(['r' + str(revision) for revision in revision_list])
for revision in revision_list:
message += "%s\n" % urls.view_revision_url(revision)
if bug_url:
message += "%s\n" % bug_url
# Add an extra new line after the rollout links, before any reason.
message += "\n"
if reason:
message += "%s\n\n" % reason
return message
def run(self, state):
# This could move to prepare-ChangeLog by adding a --revert= option.
self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().prepare_changelog_command(), cwd=self._tool.scm().checkout_root)
changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)
bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None
message = self._message_for_revert(state["revision_list"], state["reason"], bug_url)
for changelog_path in changelog_paths:
# FIXME: Seems we should prepare the message outside of changelogs.py and then just pass in
# text that we want to use to replace the reviewed by line.
ChangeLog(changelog_path).update_with_unreviewed_message(message)
| bsd-3-clause |
skarra/CalDAVClientLibrary | caldavclientlibrary/client/account.py | 1 | 1580 | ##
# Copyright (c) 2006-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.client.clientsession import CalDAVSession
from caldavclientlibrary.client.principal import principalCache
class CalDAVAccount(object):
def __init__(self, server, port=None, ssl=False, user="", pswd="", principal=None, root=None, logging=False):
self.session = CalDAVSession(server, port, ssl, user, pswd, principal, root, logging)
self.principal = principalCache.getPrincipal(self.session, self.session.principalPath)
def setUserPswd(self, user, pswd):
self.session.setUserPswd(user, pswd)
self.principal = principalCache.getPrincipal(self.session, self.session.principalPath)
def getPrincipal(self, path=None, refresh=False):
if path:
return principalCache.getPrincipal(self.session, path, refresh=refresh)
elif refresh:
self.principal = principalCache.getPrincipal(self.session, self.session.principalPath, refresh=refresh)
return self.principal
| apache-2.0 |
pierreg/tensorflow | tensorflow/python/kernel_tests/summary_ops_test.py | 10 | 3656 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SummaryOpsTest(tf.test.TestCase):
def _AsSummary(self, s):
summ = tf.Summary()
summ.ParseFromString(s)
return summ
def testScalarSummary(self):
with self.test_session() as sess:
const = tf.constant([10.0, 20.0])
summ = tf.scalar_summary(["c1", "c2"], const, name="mysumm")
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testScalarSummaryDefaultName(self):
with self.test_session() as sess:
const = tf.constant([10.0, 20.0])
summ = tf.scalar_summary(["c1", "c2"], const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testMergeSummary(self):
with self.test_session() as sess:
const = tf.constant(10.0)
summ1 = tf.summary.histogram("h", const)
summ2 = tf.scalar_summary("c", const)
merge = tf.summary.merge([summ1, summ2])
value = sess.run(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
value {
tag: "h"
histo {
min: 10.0
max: 10.0
num: 1.0
sum: 10.0
sum_squares: 100.0
bucket_limit: 9.93809490288
bucket_limit: 10.9319043932
bucket_limit: 1.7976931348623157e+308
bucket: 0.0
bucket: 1.0
bucket: 0.0
}
}
value { tag: "c" simple_value: 10.0 }
""", self._AsSummary(value))
def testMergeAllSummaries(self):
with tf.Graph().as_default():
const = tf.constant(10.0)
summ1 = tf.summary.histogram("h", const)
summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
summ3 = tf.summary.scalar("c", const)
merge = tf.summary.merge_all()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
self.assertEqual(summ3, merge.op.inputs[1])
merge = tf.merge_all_summaries("foo_key")
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(1, len(merge.op.inputs))
self.assertEqual(summ2, merge.op.inputs[0])
self.assertTrue(tf.merge_all_summaries("bar_key") is None)
def testHistogramSummaryTypes(self):
with tf.Graph().as_default():
for dtype in (tf.int8, tf.uint8, tf.int16, tf.int32,
tf.float32, tf.float64):
const = tf.constant(10, dtype=dtype)
tf.summary.histogram("h", const)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
marc-sensenich/ansible | lib/ansible/modules/database/influxdb/influxdb_user.py | 15 | 5857 | #!/usr/bin/python
# (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users
version_added: 2.5
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
user_password:
description:
- Password to be set for the user.
required: false
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ present, absent ]
default: present
extends_documentation_fragment: influxdb
'''
EXAMPLES = '''
- name: Create a user on localhost using default login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
- name: Create a user on localhost using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create an admin user on a remote host using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Destroy a user using custom login credentials
influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
state: absent
'''
RETURN = '''
#only defaults
'''
import ansible.module_utils.urls
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ansible.module_utils.urls.ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=str(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=True)
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
if state == 'present':
if user:
changed = False
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed)
else:
user_password = user_password or ''
create_user(module, client, user_name, user_password, admin)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
googleinterns/learnbase | learnbase/src/main/webapp/WEB-INF/Lib/macurl2path.py | 332 | 3275 | """Macintosh-specific module for conversion between pathnames and URLs.
Do not import directly; use urllib instead."""
import urllib
import os
__all__ = ["url2pathname","pathname2url"]
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
#
# XXXX The .. handling should be fixed...
#
tp = urllib.splittype(pathname)[0]
if tp and tp != 'file':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
# Turn starting /// into /, an empty hostname means current host
if pathname[:3] == '///':
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = pathname.split('/')
# Remove . and embedded ..
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] != '':
del components[i]
else:
i = i+1
if not components[0]:
# Absolute unix path, don't start with colon
rv = ':'.join(components[1:])
else:
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
rv = ':' + ':'.join(components)
# and finally unquote slashes and other funny characters
return urllib.unquote(rv)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
if '/' in pathname:
raise RuntimeError, "Cannot convert pathname containing slashes"
components = pathname.split(':')
# Remove empty first and/or last component
if components[0] == '':
del components[0]
if components[-1] == '':
del components[-1]
# Replace empty string ('::') by .. (will result in '/../' later)
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
# Truncate names longer than 31 bytes
components = map(_pncomp2url, components)
if os.path.isabs(pathname):
return '/' + '/'.join(components)
else:
return '/'.join(components)
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
def test():
for url in ["index.html",
"bar/index.html",
"/foo/bar/index.html",
"/foo/bar/",
"/"]:
print '%r -> %r' % (url, url2pathname(url))
for path in ["drive:",
"drive:dir:",
"drive:dir:file",
"drive:file",
"file",
":file",
":dir:",
":dir:file"]:
print '%r -> %r' % (path, pathname2url(path))
if __name__ == '__main__':
test()
| apache-2.0 |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/netrender/slave.py | 2 | 17536 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import sys, os, platform, shutil
import http, http.client, http.server
import subprocess, time, threading
import json
import bpy
from netrender.utils import *
import netrender.model
import netrender.repath
import netrender.baking
import netrender.thumbnail as thumbnail
BLENDER_PATH = sys.argv[0]
CANCEL_POLL_SPEED = 2
MAX_TIMEOUT = 10
INCREMENT_TIMEOUT = 1
MAX_CONNECT_TRY = 10
def clearSlave(path):
shutil.rmtree(path)
def slave_Info(netsettings):
sysname, nodename, release, version, machine, processor = platform.uname()
slave = netrender.model.RenderSlave()
slave.name = nodename
slave.stats = sysname + " " + release + " " + machine + " " + processor
if netsettings.slave_tags:
slave.tags = set(netsettings.slave_tags.split(";"))
if netsettings.slave_bake:
slave.tags.add(netrender.model.TAG_BAKING)
if netsettings.slave_render:
slave.tags.add(netrender.model.TAG_RENDER)
return slave
def testCancel(conn, job_id, frame_number):
with ConnectionContext():
conn.request("HEAD", "/status", headers={"job-id":job_id, "job-frame": str(frame_number)})
# canceled if job isn't found anymore
if responseStatus(conn) == http.client.NO_CONTENT:
return True
else:
return False
def testFile(conn, job_id, slave_id, rfile, job_prefix, main_path=None):
job_full_path = createLocalPath(rfile, job_prefix, main_path, rfile.force)
found = os.path.exists(job_full_path)
if found and rfile.signature != None:
found_signature = hashFile(job_full_path)
found = found_signature == rfile.signature
if not found:
print("Found file %s at %s but signature mismatch!" % (rfile.filepath, job_full_path))
os.remove(job_full_path)
if not found:
# Force prefix path if not found
job_full_path = createLocalPath(rfile, job_prefix, main_path, True)
print("Downloading", job_full_path)
temp_path = os.path.join(job_prefix, "slave.temp")
with ConnectionContext():
conn.request("GET", fileURL(job_id, rfile.index), headers={"slave-id":slave_id})
response = conn.getresponse()
if response.status != http.client.OK:
return None # file for job not returned by server, need to return an error code to server
f = open(temp_path, "wb")
buf = response.read(1024)
while buf:
f.write(buf)
buf = response.read(1024)
f.close()
os.renames(temp_path, job_full_path)
rfile.filepath = job_full_path
return job_full_path
def breakable_timeout(timeout):
for i in range(timeout):
time.sleep(1)
if engine.test_break():
break
def render_slave(engine, netsettings, threads):
bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break)
engine.update_stats("", "Network render node initiation")
slave_path = bpy.path.abspath(netsettings.path)
if not os.path.exists(slave_path):
print("Slave working path ( %s ) doesn't exist" % netsettings.path)
return
if not os.access(slave_path, os.W_OK):
print("Slave working path ( %s ) is not writable" % netsettings.path)
return
conn = clientConnection(netsettings)
if not conn:
print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY)
bisleep.reset()
for i in range(MAX_CONNECT_TRY):
bisleep.sleep()
conn = clientConnection(netsettings)
if conn or engine.test_break():
break
print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current))
if conn:
with ConnectionContext():
conn.request("POST", "/slave", json.dumps(slave_Info(netsettings).serialize()))
response = conn.getresponse()
response.read()
slave_id = response.getheader("slave-id")
NODE_PREFIX = os.path.join(slave_path, "slave_" + slave_id)
verifyCreateDir(NODE_PREFIX)
engine.update_stats("", "Network render connected to master, waiting for jobs")
while not engine.test_break():
with ConnectionContext():
conn.request("GET", "/job", headers={"slave-id":slave_id})
response = conn.getresponse()
if response.status == http.client.OK:
bisleep.reset()
job = netrender.model.RenderJob.materialize(json.loads(str(response.read(), encoding='utf8')))
engine.update_stats("", "Network render processing job from master")
job_prefix = os.path.join(NODE_PREFIX, "job_" + job.id)
verifyCreateDir(job_prefix)
# set tempdir for fsaa temp files
# have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting
os.environ["TMP"] = job_prefix
if job.type == netrender.model.JOB_BLENDER:
job_path = job.files[0].original_path # original path of the first file
main_path, main_file = os.path.split(job_path)
job_full_path = testFile(conn, job.id, slave_id, job.files[0], job_prefix)
print("Fullpath", job_full_path)
print("File:", main_file, "and %i other files" % (len(job.files) - 1,))
for rfile in job.files[1:]:
testFile(conn, job.id, slave_id, rfile, job_prefix, main_path)
print("\t", rfile.filepath)
netrender.repath.update(job)
engine.update_stats("", "Render File " + main_file + " for job " + job.id)
elif job.type == netrender.model.JOB_VCS:
if not job.version_info:
# Need to return an error to server, incorrect job type
pass
job_path = job.files[0].filepath # path of main file
main_path, main_file = os.path.split(job_path)
job.version_info.update()
# For VCS jobs, file path is relative to the working copy path
job_full_path = os.path.join(job.version_info.wpath, job_path)
engine.update_stats("", "Render File " + main_file + " for job " + job.id)
# announce log to master
logfile = netrender.model.LogFile(job.id, slave_id, [frame.number for frame in job.frames])
with ConnectionContext():
conn.request("POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8'))
response = conn.getresponse()
response.read()
first_frame = job.frames[0].number
# start render
start_t = time.time()
if job.rendersWithBlender():
frame_args = []
for frame in job.frames:
print("frame", frame.number)
frame_args += ["-f", str(frame.number)]
with NoErrorDialogContext():
process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(job_prefix, "######"), "-E", job.render, "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif job.subtype == netrender.model.JOB_SUB_BAKING:
tasks = []
for frame in job.frames:
tasks.append(netrender.baking.commandToTask(frame.command))
with NoErrorDialogContext():
process = netrender.baking.bake(job, tasks)
elif job.type == netrender.model.JOB_PROCESS:
command = job.frames[0].command
with NoErrorDialogContext():
process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
headers = {"slave-id":slave_id}
results = []
line = ""
class ProcessData:
def __init__(self):
self.lock = threading.Lock()
self.stdout = bytes()
self.cancelled = False
self.start_time = time.time()
self.last_time = time.time()
data = ProcessData()
def run_process(process, data):
while not data.cancelled and process.poll() is None:
buf = process.stdout.read(1024)
data.lock.acquire()
data.stdout += buf
data.lock.release()
process_thread = threading.Thread(target=run_process, args=(process, data))
process_thread.start()
while not data.cancelled and process_thread.is_alive():
time.sleep(CANCEL_POLL_SPEED / 2)
current_time = time.time()
data.cancelled = engine.test_break()
if current_time - data.last_time > CANCEL_POLL_SPEED:
data.lock.acquire()
# update logs if needed
if data.stdout:
# (only need to update on one frame, they are linked
with ConnectionContext():
conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers)
responseStatus(conn)
stdout_text = str(data.stdout, encoding='utf8')
# Also output on console
if netsettings.use_slave_output_log:
print(stdout_text, end="")
lines = stdout_text.split("\n")
lines[0] = line + lines[0]
line = lines.pop()
if job.subtype == netrender.model.JOB_SUB_BAKING:
results.extend(netrender.baking.resultsFromOuput(lines))
data.stdout = bytes()
data.lock.release()
data.last_time = current_time
if testCancel(conn, job.id, first_frame):
engine.update_stats("", "Job canceled by Master")
data.cancelled = True
process_thread.join()
del process_thread
if job.type == netrender.model.JOB_BLENDER:
netrender.repath.reset(job)
# read leftovers if needed
data.stdout += process.stdout.read()
if data.cancelled:
# kill process if needed
if process.poll() is None:
try:
process.terminate()
except OSError:
pass
continue # to next frame
# flush the rest of the logs
if data.stdout:
stdout_text = str(data.stdout, encoding='utf8')
# Also output on console
if netsettings.use_slave_output_log:
print(stdout_text, end="")
lines = stdout_text.split("\n")
lines[0] = line + lines[0]
if job.subtype == netrender.model.JOB_SUB_BAKING:
results.extend(netrender.baking.resultsFromOuput(lines))
# (only need to update on one frame, they are linked
with ConnectionContext():
conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers)
if responseStatus(conn) == http.client.NO_CONTENT:
continue
total_t = time.time() - data.start_time
avg_t = total_t / len(job.frames)
status = process.returncode
print("status", status)
headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)}
if status == 0: # non zero status is error
headers["job-result"] = str(netrender.model.FRAME_DONE)
for frame in job.frames:
headers["job-frame"] = str(frame.number)
if job.hasRenderResult():
# send image back to server
filename = os.path.join(job_prefix, "%06d.exr" % frame.number)
# thumbnail first
if netsettings.use_slave_thumb:
thumbname = thumbnail.generate(filename)
if thumbname:
f = open(thumbname, 'rb')
with ConnectionContext():
conn.request("PUT", "/thumb", f, headers=headers)
f.close()
responseStatus(conn)
f = open(filename, 'rb')
with ConnectionContext():
conn.request("PUT", "/render", f, headers=headers)
f.close()
if responseStatus(conn) == http.client.NO_CONTENT:
continue
elif job.subtype == netrender.model.JOB_SUB_BAKING:
index = job.frames.index(frame)
frame_results = [result_filepath for task_index, result_filepath in results if task_index == index]
for result_filepath in frame_results:
result_path, result_filename = os.path.split(result_filepath)
headers["result-filename"] = result_filename
headers["job-finished"] = str(result_filepath == frame_results[-1])
f = open(result_filepath, 'rb')
with ConnectionContext():
conn.request("PUT", "/result", f, headers=headers)
f.close()
if responseStatus(conn) == http.client.NO_CONTENT:
continue
elif job.type == netrender.model.JOB_PROCESS:
with ConnectionContext():
conn.request("PUT", "/render", headers=headers)
if responseStatus(conn) == http.client.NO_CONTENT:
continue
else:
headers["job-result"] = str(netrender.model.FRAME_ERROR)
for frame in job.frames:
headers["job-frame"] = str(frame.number)
# send error result back to server
with ConnectionContext():
conn.request("PUT", "/render", headers=headers)
if responseStatus(conn) == http.client.NO_CONTENT:
continue
engine.update_stats("", "Network render connected to master, waiting for jobs")
else:
bisleep.sleep()
conn.close()
if netsettings.use_slave_clear:
clearSlave(NODE_PREFIX)
if __name__ == "__main__":
pass
| gpl-3.0 |
diegoguimaraes/django | tests/validators/tests.py | 14 | 14725 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta
import re
import types
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
validate_comma_separated_integer_list, validate_email, validate_integer,
validate_ipv46_address, validate_ipv4_address, validate_ipv6_address,
validate_slug,
)
from django.test import SimpleTestCase
from django.test.utils import str_prefix
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected].उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, '[email protected].', ValidationError),
# Max length of domain name in email is 249 (see validator for calculation)
(validate_email, 'a@%s.us' % ('a' * 249), None),
(validate_email, 'a@%s.us' % ('a' * 250), ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, '[email protected]', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'HTTP://WWW.DJANGOPROJECT.COM/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'https://example.com/', None),
(URLValidator(), 'ftp://example.com/', None),
(URLValidator(), 'ftps://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://invalid.com-', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(URLValidator(), 'file://localhost/path', ValidationError),
(URLValidator(), 'git://example.com/', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(SimpleTestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
def test_max_length_validator_message(self):
v = MaxLengthValidator(16, message='"%(value)s" has more than %(limit_value)d characters.')
with self.assertRaisesMessage(ValidationError, '"djangoproject.com" has more than 16 characters.'):
v('djangoproject.com')
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
| bsd-3-clause |
heli522/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
yinsu/grpc | src/python/grpcio_test/grpc_test/_junkdrawer/math_pb2.py | 47 | 8463 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(nathaniel): Remove this from source control after having made
# generation from the math.proto source part of GRPC's build-and-test
# process.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: math.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='math.proto',
package='math',
serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DIVARGS = _descriptor.Descriptor(
name='DivArgs',
full_name='math.DivArgs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dividend', full_name='math.DivArgs.dividend', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='divisor', full_name='math.DivArgs.divisor', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=64,
)
_DIVREPLY = _descriptor.Descriptor(
name='DivReply',
full_name='math.DivReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='quotient', full_name='math.DivReply.quotient', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='remainder', full_name='math.DivReply.remainder', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=113,
)
_FIBARGS = _descriptor.Descriptor(
name='FibArgs',
full_name='math.FibArgs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='math.FibArgs.limit', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=139,
)
_NUM = _descriptor.Descriptor(
name='Num',
full_name='math.Num',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num', full_name='math.Num.num', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=159,
)
_FIBREPLY = _descriptor.Descriptor(
name='FibReply',
full_name='math.FibReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='math.FibReply.count', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=186,
)
DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS
DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY
DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS
DESCRIPTOR.message_types_by_name['Num'] = _NUM
DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY
DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict(
DESCRIPTOR = _DIVARGS,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.DivArgs)
))
_sym_db.RegisterMessage(DivArgs)
DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict(
DESCRIPTOR = _DIVREPLY,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.DivReply)
))
_sym_db.RegisterMessage(DivReply)
FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict(
DESCRIPTOR = _FIBARGS,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.FibArgs)
))
_sym_db.RegisterMessage(FibArgs)
Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict(
DESCRIPTOR = _NUM,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.Num)
))
_sym_db.RegisterMessage(Num)
FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict(
DESCRIPTOR = _FIBREPLY,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.FibReply)
))
_sym_db.RegisterMessage(FibReply)
# @@protoc_insertion_point(module_scope)
| bsd-3-clause |
Nowheresly/odoo | addons/l10n_at/account_wizard.py | 379 | 1234 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp import addons
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
blitzagency/flowbee | flowbee/deciders/events.py | 1 | 13786 | """SWF Event Types
Possible Decider Events:
http://boto3.readthedocs.org/en/latest/reference/services/swf.html#SWF.Client.poll_for_decision_task
WorkflowExecutionStarted
WorkflowExecutionCancelRequested
WorkflowExecutionCompleted
CompleteWorkflowExecutionFailed
WorkflowExecutionFailed
FailWorkflowExecutionFailed
WorkflowExecutionTimedOut
WorkflowExecutionCanceled
CancelWorkflowExecutionFailed
WorkflowExecutionContinuedAsNew
ContinueAsNewWorkflowExecutionFailed
WorkflowExecutionTerminated
DecisionTaskScheduled
DecisionTaskStarted
DecisionTaskCompleted
DecisionTaskTimedOut
ActivityTaskScheduled
ScheduleActivityTaskFailed
ActivityTaskStarted
ActivityTaskCompleted
ActivityTaskFailed
ActivityTaskTimedOut
ActivityTaskCanceled
ActivityTaskCancelRequested
RequestCancelActivityTaskFailed
WorkflowExecutionSignaled
MarkerRecorded
RecordMarkerFailed
TimerStarted
StartTimerFailed
TimerFired
TimerCanceled
CancelTimerFailed
StartChildWorkflowExecutionInitiated
StartChildWorkflowExecutionFailed
ChildWorkflowExecutionStarted
ChildWorkflowExecutionCompleted
ChildWorkflowExecutionFailed
ChildWorkflowExecutionTimedOut
ChildWorkflowExecutionCanceled
ChildWorkflowExecutionTerminated
SignalExternalWorkflowExecutionInitiated
SignalExternalWorkflowExecutionFailed
ExternalWorkflowExecutionSignaled
RequestCancelExternalWorkflowExecutionInitiated
RequestCancelExternalWorkflowExecutionFailed
ExternalWorkflowExecutionCancelRequested
LambdaFunctionScheduled
LambdaFunctionStarted
LambdaFunctionCompleted
LambdaFunctionFailed
LambdaFunctionTimedOut
ScheduleLambdaFunctionFailed
StartLambdaFunctionFailed
"""
import logging
from .. import exceptions
from .. import compression
from .. import utils
log = logging.getLogger(__name__)
class DeciderEvent(object):
def __init__(
self, meta, event, event_history):
self.client = utils.get_client()
self.meta = meta
self.type = event["eventType"]
self.event = event
self.event_history = event_history
self.payload = None
self.prepare_event()
def prepare_event(self, event):
raise NotImplementedError()
def deserialize(self, data):
return compression.decompress_b64_json(data)
def serialize(self, data):
return compression.compress_b64_json(data)
class WorkflowExecutionStarted(DeciderEvent):
"""WorkflowExecutionStarted Event
{
u'eventId': 1,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 7, 17000, tzinfo=tzlocal()),
u'eventType': u'WorkflowExecutionStarted',
u'workflowExecutionStartedEventAttributes': {
u'childPolicy': u'TERMINATE',
u'executionStartToCloseTimeout': u'60',
u'input': u'H4sIADdu91YC/6tWyk0tSVSyUqiu1VFQSkmEsJVApFKSkg6IsFIwrK0FALiLFCcoAAAA',
u'parentInitiatedEventId': 0,
u'taskList': {u'name': u'flowbee-test-tasks'},
u'taskPriority': u'0',
u'taskStartToCloseTimeout': u'10',
u'workflowType': {
u'name': u'MyWorkflow.MyActivities',
u'version': u'0.0.1'}
}
}
"""
def prepare_event(self):
try:
attributes = self.event["workflowExecutionStartedEventAttributes"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, self.event)
log.error(message)
raise exceptions.EventException(message=message)
data = attributes.get("input", None)
try:
self.workflow_name = attributes["workflowType"]["name"]
self.workflow_version = attributes["workflowType"]["version"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, attributes)
log.error(message)
raise exceptions.EventException(message=message)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
class ActivityAbstractFailure(DeciderEvent):
def retry(self):
log.info(
"Retrying task '%s@%s'. Retry attempt: %s",
self.task_name, self.task_version, self.num_retries
)
utils.schedule_activity(
client=self.client,
tasklist=self.tasklist,
activity_id=self.activity_id,
task_token=self.meta.task_token,
name=self.task_name,
version=self.task_version,
payload=self.payload,
attempt=self.num_retries
)
def process_history(self, attributes):
try:
scheduled_event_id = attributes["scheduledEventId"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, self.event)
log.error(message)
raise exceptions.EventException(message=message)
try:
scheduled_activity_event = [evt for evt in self.event_history if evt["eventId"] == scheduled_event_id][0]
except IndexError:
message = "Unable to find event id '{0}' in event_history".format(scheduled_event_id)
log.error(message)
raise exceptions.EventException(message=message)
try:
activity = scheduled_activity_event["activityTaskScheduledEventAttributes"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, scheduled_activity_event)
log.error(message)
raise exceptions.EventException(message=message)
try:
self.activity_id = activity["activityId"].rsplit("-", 1)[0]
self.tasklist = activity["taskList"]["name"]
self.task_name = activity["activityType"]["name"]
self.task_version = activity["activityType"]["version"]
self.payload = activity["input"]
except KeyError as e:
message = "Unable to find key '{0}' in 'activityTaskScheduledEventAttributes'".format(e.message)
log.error(message)
raise exceptions.EventException(message=message)
self.num_retries = sum([
1 for evt in self.event_history
if evt["eventType"] == "ActivityTaskScheduled" and
evt["activityTaskScheduledEventAttributes"]["activityId"].startswith(self.activity_id)
])
class ActivityTaskScheduled(DeciderEvent):
"""ActivityTaskScheduled Event
{
u'activityTaskScheduledEventAttributes': {
u'activityId': u'com.flowbee-test.MyWorkflow.MyWorkflow.MyActivities-eb4d44a2c088452a8de053caf50209f7.23gHXuoeTXnzl8Xts+14bNNscjpxZaCJmit8tr2y2Ofzs=.stage1@0.0.1-0',
u'activityType': {
u'name': u'stage1',
u'version': u'0.0.1'},
u'decisionTaskCompletedEventId': 9,
u'heartbeatTimeout': u'NONE',
u'input': u'H4sIADxu91YC/6tWSixKL1ayUohWyilNrlSK1VFQyi6HilUrpeXng+lEIKmUpKQDIqwUDGtrawHg8m1aOQAAAA==',
u'scheduleToCloseTimeout': u'10',
u'scheduleToStartTimeout': u'10',
u'startToCloseTimeout': u'NONE',
u'taskList': {u'name': u'flowbee-test-tasks'},
u'taskPriority': u'0'},
u'eventId': 10,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 12, 560000, tzinfo=tzlocal()),
u'eventType': u'ActivityTaskScheduled'
}
"""
def prepare_event(self):
try:
attributes = self.event["activityTaskScheduledEventAttributes"]
except KeyError:
message = "Unable to lookup '{0}' in {1}".format(e.message, self.event)
log.error(message)
raise exceptions.EventException(message=message)
self.tasklist = attributes["taskList"]["name"]
self.priority = attributes["taskPriority"]
self.name = attributes["activityType"]["name"]
self.version = attributes["activityType"]["version"]
self.activity_id = attributes["activityId"]
data = attributes.get("input", None)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
class ActivityTaskStarted(DeciderEvent):
"""ActivityTaskStarted
{
u'activityTaskStartedEventAttributes': {
u'identity': u'MyWorkflow',
u'scheduledEventId': 10},
u'eventId': 11,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 12, 599000, tzinfo=tzlocal()),
u'eventType': u'ActivityTaskStarted'
}
"""
def prepare_event(self):
return
class ActivityTaskCompleted(DeciderEvent):
"""ActivityTaskCompleted Event
{
u'eventId': 15,
u'eventType': u'ActivityTaskCompleted',
u'activityTaskCompletedEventAttributes': {
u'startedEventId': 14,
u'scheduledEventId': 13,
u'result': u'H4sIABZt91YC/1MqLilKLE9KLSrKTC1WAgBhRJKGDgAAAA=='},
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 15, 17, 771000, tzinfo=tzlocal())
}
"""
def prepare_event(self):
data = self.event \
.get("activityTaskCompletedEventAttributes", {}) \
.get("result", None)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
class ActivityTaskTimedOut(ActivityAbstractFailure):
"""ActivityTaskTimedOut Event
{
u'activityTaskTimedOutEventAttributes': {
u'scheduledEventId': 16,
u'startedEventId': 17,
u'timeoutType': u'SCHEDULE_TO_CLOSE'},
u'eventId': 18,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 29, 57, 609000, tzinfo=tzlocal()),
u'eventType': u'ActivityTaskTimedOut'
}
"""
def prepare_event(self):
self.num_retries = 0
attributes = self.event.get("activityTaskTimedOutEventAttributes")
self.process_history(attributes)
class ActivityTaskFailed(ActivityAbstractFailure):
def prepare_event(self):
self.num_retries = 0
attributes = self.event.get("activityTaskFailedEventAttributes")
self.process_history(attributes)
class ScheduleActivityTaskFailed(DeciderEvent):
def prepare_event(self):
attributes = self.event["scheduleActivityTaskFailed"]
activity_id = attributes.get("activityId", "unknown activity id")
activity_name = attributes.get("activityType", {}).get("name", "unknown name")
activity_version = attributes.get("activityType", {}).get("version", "unknown version")
cause = attributes.get("cause", "unknown")
message = "Failed to schedule activity[%s@%s]: %s - %s" \
.format(cause, activity_name, activity_version, activity_id)
log.error(message)
raise exceptions.EventException(message=message)
class TimerStarted(DeciderEvent):
"""TimerStarted Event
{
u'eventId': 5,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 7, 363000, tzinfo=tzlocal()),
u'eventType': u'TimerStarted',
u'timerStartedEventAttributes': {
u'decisionTaskCompletedEventId': 4,
u'startToFireTimeout': u'5',
u'timerId': u'com.flowbee-test.MyWorkflow.MyWorkflow.MyActivities-eb4d44a2c088452a8de053caf50209f7.23gHXuoeTXnzl8Xts+14bNNscjpxZaCJmit8tr2y2Ofzs='}
}
"""
def prepare_event(self):
try:
attributes = self.event["timerStartedEventAttributes"]
except KeyError:
message = "Unable to locate 'timerStartedEventAttributes' on {0}".format(self.event)
log.error(message)
raise exceptions.EventException(message=message)
self.timer_id = attributes["timerId"]
self.seconds = int(attributes["startToFireTimeout"])
try:
data = attributes["control"]
except KeyError:
data = None
if data is None:
self.payload = None
else:
self.payload = self.deserialize(data)
class TimerFired(DeciderEvent):
"""TimerFired Event
{
u'eventId': 6,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 12, 367000, tzinfo=tzlocal()),
u'eventType': u'TimerFired',
u'timerFiredEventAttributes': {
u'startedEventId': 5,
u'timerId': u'com.flowbee-test.MyWorkflow.MyWorkflow.MyActivities-eb4d44a2c088452a8de053caf50209f7.23gHXuoeTXnzl8Xts+14bNNscjpxZaCJmit8tr2y2Ofzs='}
}
"""
def prepare_event(self):
timer_id = self.event.get("timerFiredEventAttributes", {}).get("timerId")
self.timer_id = timer_id
if timer_id is None:
message = "Unable to locate 'timerId' on 'timerFiredEventAttributes'"
log.error(message)
raise exceptions.EventException(message=message)
try:
timer_started_event = [
x for x in self.event_history
if x["eventType"] == "TimerStarted" and
x["timerStartedEventAttributes"]["timerId"] == timer_id][0]
except KeyError as e:
message = "Failed to find key in event_history '{0}'".format(e.message)
log.error(message)
raise exceptions.EventException(message=message)
except IndexError as e:
message = "Failed to locate corresponding 'TimerStarted' event with id '{0}'".format(timer_id)
log.error(message)
raise exceptions.EventException(message=message)
data = timer_started_event \
.get("timerStartedEventAttributes", {}) \
.get("control", None)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
| mit |
afaheem88/tempest_neutron | tempest/api/compute/admin/test_networks.py | 8 | 2055 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
CONF = config.CONF
class NetworksTest(base.BaseComputeAdminTest):
_api_version = 2
"""
Tests Nova Networks API that usually requires admin privileges.
API docs:
http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks
"""
@classmethod
def resource_setup(cls):
super(NetworksTest, cls).resource_setup()
cls.client = cls.os_adm.networks_client
def test_get_network(self):
resp, networks = self.client.list_networks()
configured_network = [x for x in networks if x['label'] ==
CONF.compute.fixed_network_name]
self.assertEqual(1, len(configured_network),
"{0} networks with label {1}".format(
len(configured_network),
CONF.compute.fixed_network_name))
configured_network = configured_network[0]
_, network = self.client.get_network(configured_network['id'])
self.assertEqual(configured_network['label'], network['label'])
def test_list_all_networks(self):
_, networks = self.client.list_networks()
# Check the configured network is in the list
configured_network = CONF.compute.fixed_network_name
self.assertIn(configured_network, [x['label'] for x in networks])
| apache-2.0 |
Davidjohnwilson/sympy | sympy/functions/special/tests/test_delta_functions.py | 32 | 2856 | from sympy import (
adjoint, conjugate, DiracDelta, Heaviside, nan, pi, sign, sqrt,
symbols, transpose, Symbol, Piecewise, I, S, Eq
)
from sympy.utilities.pytest import raises
from sympy.core.function import ArgumentIndexError
x, y = symbols('x y')
def test_DiracDelta():
assert DiracDelta(1) == 0
assert DiracDelta(5.1) == 0
assert DiracDelta(-pi) == 0
assert DiracDelta(5, 7) == 0
assert DiracDelta(nan) == nan
assert DiracDelta(0).func is DiracDelta
assert DiracDelta(x).func is DiracDelta
assert adjoint(DiracDelta(x)) == DiracDelta(x)
assert adjoint(DiracDelta(x - y)) == DiracDelta(x - y)
assert conjugate(DiracDelta(x)) == DiracDelta(x)
assert conjugate(DiracDelta(x - y)) == DiracDelta(x - y)
assert transpose(DiracDelta(x)) == DiracDelta(x)
assert transpose(DiracDelta(x - y)) == DiracDelta(x - y)
assert DiracDelta(x).diff(x) == DiracDelta(x, 1)
assert DiracDelta(x, 1).diff(x) == DiracDelta(x, 2)
assert DiracDelta(x).is_simple(x) is True
assert DiracDelta(3*x).is_simple(x) is True
assert DiracDelta(x**2).is_simple(x) is False
assert DiracDelta(sqrt(x)).is_simple(x) is False
assert DiracDelta(x).is_simple(y) is False
assert DiracDelta(x*y).simplify(x) == DiracDelta(x)/abs(y)
assert DiracDelta(x*y).simplify(y) == DiracDelta(y)/abs(x)
assert DiracDelta(x**2*y).simplify(x) == DiracDelta(x**2*y)
assert DiracDelta(y).simplify(x) == DiracDelta(y)
assert DiracDelta((x - 1)*(x - 2)*(x - 3)).simplify(x) == \
DiracDelta(x - 3)/2 + DiracDelta(x - 2) + DiracDelta(x - 1)/2
raises(ArgumentIndexError, lambda: DiracDelta(x).fdiff(2))
raises(ValueError, lambda: DiracDelta(x, -1))
def test_heaviside():
assert Heaviside(0).func == Heaviside
assert Heaviside(-5) == 0
assert Heaviside(1) == 1
assert Heaviside(nan) == nan
assert adjoint(Heaviside(x)) == Heaviside(x)
assert adjoint(Heaviside(x - y)) == Heaviside(x - y)
assert conjugate(Heaviside(x)) == Heaviside(x)
assert conjugate(Heaviside(x - y)) == Heaviside(x - y)
assert transpose(Heaviside(x)) == Heaviside(x)
assert transpose(Heaviside(x - y)) == Heaviside(x - y)
assert Heaviside(x).diff(x) == DiracDelta(x)
assert Heaviside(x + I).is_Function is True
assert Heaviside(I*x).is_Function is True
raises(ArgumentIndexError, lambda: Heaviside(x).fdiff(2))
raises(ValueError, lambda: Heaviside(I))
raises(ValueError, lambda: Heaviside(2 + 3*I))
def test_rewrite():
x, y = Symbol('x', real=True), Symbol('y')
assert Heaviside(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (S(1)/2, Eq(x, 0)), (0, True))
assert Heaviside(y).rewrite(Piecewise) == Heaviside(y)
assert Heaviside(x).rewrite(sign) == (sign(x)+1)/2
assert Heaviside(y).rewrite(sign) == Heaviside(y)
| bsd-3-clause |
zubron/servo | tests/wpt/web-platform-tests/cors/resources/status.py | 220 | 1223 | def main(request, response):
response.headers.set("Access-Control-Allow-Origin", request.headers.get("origin") )
response.headers.set("Access-Control-Expose-Headers", "X-Request-Method")
if request.method == 'OPTIONS':
response.headers.set("Access-Control-Allow-Methods", "GET, CHICKEN, HEAD, POST, PUT")
if 'headers' in request.GET:
response.headers.set("Access-Control-Allow-Headers", request.GET.first('headers'))
response.headers.set("X-Request-Method", request.method)
response.headers.set("X-A-C-Request-Method", request.headers.get("Access-Control-Request-Method", ""));
#This should reasonably work for most response codes.
try:
code = int(request.GET.first("code", 200))
except ValueError:
code = 200
text = request.GET.first("text", "OMG")
if request.method == "OPTIONS" and "preflight" in request.GET:
try:
code = int(request.GET.first('preflight'))
except KeyError, ValueError:
pass
status = code, text
if "type" in request.GET:
response.headers.set("Content-Type", request.GET.first('type'))
body = request.GET.first('content', "")
return status, [], body
| mpl-2.0 |
xodus7/tensorflow | tensorflow/contrib/bigtable/python/ops/bigtable_api.py | 4 | 28480 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Python API for TensorFlow's Cloud Bigtable integration.
TensorFlow has support for reading from and writing to Cloud Bigtable. To use
TensorFlow + Cloud Bigtable integration, first create a BigtableClient to
configure your connection to Cloud Bigtable, and then create a BigtableTable
object to allow you to create numerous `tf.data.Dataset`s to read data, or
write a `tf.data.Dataset` object to the underlying Cloud Bigtable table.
For background on Cloud Bigtable, see: https://cloud.google.com/bigtable .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import iteritems
from six import string_types
from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.util import loader
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader
_bigtable_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_bigtable.so"))
class BigtableClient(object):
"""BigtableClient is the entrypoint for interacting with Cloud Bigtable in TF.
BigtableClient encapsulates a connection to Cloud Bigtable, and exposes the
`table` method to open a Bigtable table.
"""
def __init__(self,
project_id,
instance_id,
connection_pool_size=None,
max_receive_message_size=None):
"""Creates a BigtableClient that can be used to open connections to tables.
Args:
project_id: A string representing the GCP project id to connect to.
instance_id: A string representing the Bigtable instance to connect to.
connection_pool_size: (Optional.) A number representing the number of
concurrent connections to the Cloud Bigtable service to make.
max_receive_message_size: (Optional.) The maximum bytes received in a
single gRPC response.
Raises:
ValueError: if the arguments are invalid (e.g. wrong type, or out of
expected ranges (e.g. negative).)
"""
if not isinstance(project_id, str):
raise ValueError("`project_id` must be a string")
self._project_id = project_id
if not isinstance(instance_id, str):
raise ValueError("`instance_id` must be a string")
self._instance_id = instance_id
if connection_pool_size is None:
connection_pool_size = -1
elif connection_pool_size < 1:
raise ValueError("`connection_pool_size` must be positive")
if max_receive_message_size is None:
max_receive_message_size = -1
elif max_receive_message_size < 1:
raise ValueError("`max_receive_message_size` must be positive")
self._connection_pool_size = connection_pool_size
self._resource = gen_bigtable_ops.bigtable_client(
project_id, instance_id, connection_pool_size, max_receive_message_size)
def table(self, name, snapshot=None):
"""Opens a table and returns a `tf.contrib.bigtable.BigtableTable` object.
Args:
name: A `tf.string` `tf.Tensor` name of the table to open.
snapshot: Either a `tf.string` `tf.Tensor` snapshot id, or `True` to
request the creation of a snapshot. (Note: currently unimplemented.)
Returns:
A `tf.contrib.bigtable.BigtableTable` Python object representing the
operations available on the table.
"""
# TODO(saeta): Implement snapshot functionality.
table = gen_bigtable_ops.bigtable_table(self._resource, name)
return BigtableTable(name, snapshot, table)
class BigtableTable(object):
"""BigtableTable is the entrypoint for reading and writing data in Cloud
Bigtable.
This BigtableTable class is the Python representation of the Cloud Bigtable
table within TensorFlow. Methods on this class allow data to be read from and
written to the Cloud Bigtable service in flexible and high performance
manners.
"""
# TODO(saeta): Investigate implementing tf.contrib.lookup.LookupInterface.
# TODO(saeta): Consider variant tensors instead of resources (while supporting
# connection pooling).
def __init__(self, name, snapshot, resource):
self._name = name
self._snapshot = snapshot
self._resource = resource
def lookup_columns(self, *args, **kwargs):
"""Retrieves the values of columns for a dataset of keys.
Example usage:
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(("cf1", "image"),
("cf2", "label"),
("cf2", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Alternatively, you can use keyword arguments to specify the columns to
capture. Example (same as above, rewritten):
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(
cf1="image", cf2=("label", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Note: certain `kwargs` keys are reserved, and thus, some column families
cannot be identified using the `kwargs` syntax. Instead, please use the
`args` syntax. This list includes:
- 'name'
Note: this list can change at any time.
Args:
*args: A list of tuples containing (column family, column name) pairs.
**kwargs: Column families (keys) and column qualifiers (values).
Returns:
A function that can be passed to `tf.data.Dataset.apply` to retrieve the
values of columns for the rows.
"""
table = self # Capture self
normalized = args
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
normalized = list(normalized)
for key, value in iteritems(kwargs):
if key == "name":
continue
if isinstance(value, str):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
def _apply_fn(dataset):
# TODO(saeta): Verify dataset's types are correct!
return _BigtableLookupDataset(dataset, table, normalized)
return _apply_fn
def keys_by_range_dataset(self, start, end):
"""Retrieves all row keys between start and end.
Note: it does NOT retrieve the values of columns.
Args:
start: The start row key. The row keys for rows after start (inclusive)
will be retrieved.
end: (Optional.) The end row key. Rows up to (but not including) end will
be retrieved. If end is None, all subsequent row keys will be retrieved.
Returns:
A `tf.data.Dataset` containing `tf.string` Tensors corresponding to all
of the row keys between `start` and `end`.
"""
# TODO(saeta): Make inclusive / exclusive configurable?
if end is None:
end = ""
return _BigtableRangeKeyDataset(self, start, end)
def keys_by_prefix_dataset(self, prefix):
"""Retrieves the row keys matching a given prefix.
Args:
prefix: All row keys that begin with `prefix` in the table will be
retrieved.
Returns:
A `tf.data.Dataset`. containing `tf.string` Tensors corresponding to all
of the row keys matching that prefix.
"""
return _BigtablePrefixKeyDataset(self, prefix)
def sample_keys(self):
"""Retrieves a sampling of row keys from the Bigtable table.
This dataset is most often used in conjunction with
`tf.contrib.data.parallel_interleave` to construct a set of ranges for
scanning in parallel.
Returns:
A `tf.data.Dataset` returning string row keys.
"""
return _BigtableSampleKeysDataset(self)
def scan_prefix(self, prefix, probability=None, columns=None, **kwargs):
"""Retrieves row (including values) from the Bigtable service.
Rows with row-key prefixed by `prefix` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, prefix, "", "", normalized, probability)
def scan_range(self, start, end, probability=None, columns=None, **kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_range("row_start", "row_end", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_range("row_start", "row_end", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, "", start, end, normalized, probability)
def parallel_scan_prefix(self,
prefix,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves row (including values) from the Bigtable service at high speed.
Rows with row-key prefixed by `prefix` will be retrieved. This method is
similar to `scan_prefix`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, prefix, "", "")
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def parallel_scan_range(self,
start,
end,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved. This method
is similar to `scan_range`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_range("row_start",
"row_end",
columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_range("row_start", "row_end",
cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, "", start, end)
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def write(self, dataset, column_families, columns, timestamp=None):
"""Writes a dataset to the table.
Args:
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
timestamp: (Optional.) An int64 timestamp to write all the values at.
Leave as None to use server-provided timestamps.
Returns:
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
number of columns and column_families does not match the output of
`dataset`.
"""
if timestamp is None:
timestamp = -1 # Bigtable server provided timestamp.
for tensor_type in nest.flatten(dataset.output_types):
if tensor_type != dtypes.string:
raise ValueError("Not all elements of the dataset were `tf.string`")
for shape in nest.flatten(dataset.output_shapes):
if not shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Not all elements of the dataset were scalars")
if len(column_families) != len(columns):
raise ValueError("len(column_families) != len(columns)")
if len(nest.flatten(dataset.output_types)) != len(columns) + 1:
raise ValueError("A column name must be specified for every component of "
"the dataset elements. (e.g.: len(columns) != "
"len(dataset.output_types))")
return gen_bigtable_ops.dataset_to_bigtable(
self._resource,
dataset._as_variant_tensor(), # pylint: disable=protected-access
column_families,
columns,
timestamp)
def _make_parallel_scan_dataset(self, ds, num_parallel_scans,
normalized_probability, normalized_columns):
"""Builds a parallel dataset from a given range.
Args:
ds: A `_BigtableSampleKeyPairsDataset` returning ranges of keys to use.
num_parallel_scans: The number of concurrent parallel scans to use.
normalized_probability: A number between 0 and 1 for the keep probability.
normalized_columns: The column families and column qualifiers to retrieve.
Returns:
A `tf.data.Dataset` representing the result of the parallel scan.
"""
if num_parallel_scans is None:
num_parallel_scans = 50
ds = ds.shuffle(buffer_size=10000) # TODO(saeta): Make configurable.
def _interleave_fn(start, end):
return _BigtableScanDataset(
self,
prefix="",
start=start,
end=end,
normalized=normalized_columns,
probability=normalized_probability)
# Note prefetch_input_elements must be set in order to avoid rpc timeouts.
ds = ds.apply(
interleave_ops.parallel_interleave(
_interleave_fn,
cycle_length=num_parallel_scans,
sloppy=True,
prefetch_input_elements=1))
return ds
def _normalize_probability(probability):
if probability is None:
probability = 1.0
if isinstance(probability, float) and (probability <= 0.0 or
probability > 1.0):
raise ValueError("probability must be in the range (0, 1].")
return probability
def _normalize_columns(columns, provided_kwargs):
"""Converts arguments (columns, and kwargs dict) to C++ representation.
Args:
columns: a datastructure containing the column families and qualifier to
retrieve. Valid types include (1) None, (2) list of tuples, (3) a tuple of
strings.
provided_kwargs: a dictionary containing the column families and qualifiers
to retrieve
Returns:
A list of pairs of column family+qualifier to retrieve.
Raises:
ValueError: If there are no cells to retrieve or the columns are in an
incorrect format.
"""
normalized = columns
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
if len(normalized) == 2:
normalized = [normalized]
else:
raise ValueError("columns was a tuple of inappropriate length")
for key, value in iteritems(provided_kwargs):
if key == "name":
continue
if isinstance(value, string_types):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
if not normalized:
raise ValueError("At least one column + column family must be specified.")
return normalized
class _BigtableKeyDataset(dataset_ops.Dataset):
"""_BigtableKeyDataset is an abstract class representing the keys of a table.
"""
def __init__(self, table):
"""Constructs a _BigtableKeyDataset.
Args:
table: a Bigtable class.
"""
super(_BigtableKeyDataset, self).__init__()
self._table = table
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.TensorShape([])
@property
def output_types(self):
return dtypes.string
class _BigtablePrefixKeyDataset(_BigtableKeyDataset):
"""_BigtablePrefixKeyDataset represents looking up keys by prefix.
"""
def __init__(self, table, prefix):
super(_BigtablePrefixKeyDataset, self).__init__(table)
self._prefix = prefix
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_prefix_key_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix)
class _BigtableRangeKeyDataset(_BigtableKeyDataset):
"""_BigtableRangeKeyDataset represents looking up keys by range.
"""
def __init__(self, table, start, end):
super(_BigtableRangeKeyDataset, self).__init__(table)
self._start = start
self._end = end
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_range_key_dataset(
table=self._table._resource, # pylint: disable=protected-access
start_key=self._start,
end_key=self._end)
class _BigtableSampleKeysDataset(_BigtableKeyDataset):
"""_BigtableSampleKeysDataset represents a sampling of row keys.
"""
# TODO(saeta): Expose the data size offsets into the keys.
def __init__(self, table):
super(_BigtableSampleKeysDataset, self).__init__(table)
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_sample_keys_dataset(
table=self._table._resource) # pylint: disable=protected-access
class _BigtableLookupDataset(dataset_ops.Dataset):
"""_BigtableLookupDataset represents a dataset that retrieves values for keys.
"""
def __init__(self, dataset, table, normalized):
self._num_outputs = len(normalized) + 1 # 1 for row key
self._dataset = dataset
self._table = table
self._normalized = normalized
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
@property
def output_classes(self):
return tuple([ops.Tensor] * self._num_outputs)
@property
def output_shapes(self):
return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
@property
def output_types(self):
return tuple([dtypes.string] * self._num_outputs)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_bigtable_ops.bigtable_lookup_dataset(
keys_dataset=self._dataset._as_variant_tensor(),
table=self._table._resource,
column_families=self._column_families,
columns=self._columns)
class _BigtableScanDataset(dataset_ops.Dataset):
"""_BigtableScanDataset represents a dataset that retrieves keys and values.
"""
def __init__(self, table, prefix, start, end, normalized, probability):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
self._probability = probability
self._num_outputs = len(normalized) + 1 # 1 for row key
@property
def output_classes(self):
return tuple([ops.Tensor] * self._num_outputs)
@property
def output_shapes(self):
return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
@property
def output_types(self):
return tuple([dtypes.string] * self._num_outputs)
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_scan_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix,
start_key=self._start,
end_key=self._end,
column_families=self._column_families,
columns=self._columns,
probability=self._probability)
class _BigtableSampleKeyPairsDataset(dataset_ops.Dataset):
"""_BigtableSampleKeyPairsDataset returns key pairs from a Bigtable table.
"""
def __init__(self, table, prefix, start, end):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
@property
def output_classes(self):
return (ops.Tensor, ops.Tensor)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return (dtypes.string, dtypes.string)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_bigtable_ops.bigtable_sample_key_pairs_dataset(
table=self._table._resource,
prefix=self._prefix,
start_key=self._start,
end_key=self._end)
| apache-2.0 |
yanchen036/tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | 2 | 53746 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.platform import test
class AssertProperIterableTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_single_tensor_raises(self):
tensor = constant_op.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(tensor)
@test_util.run_in_graph_and_eager_modes()
def test_single_sparse_tensor_raises(self):
ten = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(ten)
@test_util.run_in_graph_and_eager_modes()
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(array)
@test_util.run_in_graph_and_eager_modes()
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(mystr)
@test_util.run_in_graph_and_eager_modes()
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
check_ops.assert_proper_iterable(non_iterable)
@test_util.run_in_graph_and_eager_modes()
def test_list_does_not_raise(self):
list_of_stuff = [
constant_op.constant([11, 22]), constant_op.constant([1, 2])
]
check_ops.assert_proper_iterable(list_of_stuff)
@test_util.run_in_graph_and_eager_modes()
def test_generator_does_not_raise(self):
generator_of_stuff = (constant_op.constant([11, 22]), constant_op.constant(
[1, 2]))
check_ops.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies([check_ops.assert_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_scalar_comparison(self):
const_true = constant_op.constant(True, name="true")
const_false = constant_op.constant(False, name="false")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(const_true, const_false, message="fail")
def test_returns_none_with_eager(self):
with context.eager_mode():
small = constant_op.constant([1, 2], name="small")
x = check_ops.assert_equal(small, small)
assert x is None
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater(self):
# Static check
static_small = constant_op.constant([1, 2], name="small")
static_big = constant_op.constant([3, 4], name="big")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
def test_raises_when_greater_dynamic(self):
with self.test_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies(
[check_ops.assert_equal(big, small, message="fail")]):
out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval(feed_dict={small: [1, 2], big: [3, 4]})
def test_error_message_eager(self):
expected_error_msg_full = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 6 elements of x:
\[2 2 3 3 6 6\]
First 6 elements of y:
\[20 2 3 30 60 6\]
"""
expected_error_msg_default = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 3 elements of x:
\[2 2 3\]
First 3 elements of y:
\[20 2 3\]
"""
expected_error_msg_short = r"""big does not equal small
Condition x == y did not hold.
Indices of first 2 different values:
\[\[0 0\]
\[1 1\]\]
Corresponding x values:
\[2 3\]
Corresponding y values:
\[20 30\]
First 2 elements of x:
\[2 2\]
First 2 elements of y:
\[20 2\]
"""
with context.eager_mode():
big = constant_op.constant([[2, 2], [3, 3], [6, 6]])
small = constant_op.constant([[20, 2], [3, 30], [60, 6]])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_full):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=10)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_default):
check_ops.assert_equal(big, small, message="big does not equal small")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_short):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=2)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less(self):
# Static check
static_small = constant_op.constant([3, 1], name="small")
static_big = constant_op.constant([4, 2], name="big")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
def test_raises_when_less_dynamic(self):
with self.test_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies([check_ops.assert_equal(small, big)]):
out = array_ops.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval(feed_dict={small: [3, 1], big: [4, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
small = constant_op.constant([[1, 2], [1, 2]], name="small")
small_2 = constant_op.constant([1, 2], name="small_2")
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
small_2 = constant_op.constant([1, 1], name="small_2")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
r"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_not_equal_and_broadcastable_shapes(self):
cond = constant_op.constant([True, False], name="small")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(cond, False, message="fail")
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertNoneEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_not_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([10, 20], name="small")
with ops.control_dependencies(
[check_ops.assert_none_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal(self):
small = constant_op.constant([3, 1], name="small")
with self.assertRaisesOpError("x != y did not hold"):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3], name="big")
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([10, 10], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
r"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_none_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_none_equal(t1, t2)
assert x is None
class AssertAllCloseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1., name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_rtol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x")
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_atol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x")
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_rtol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x", dtype=np.float64)
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_atol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x", dtype=np.float64)
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_due_to_custom_rtol(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1.1, name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., rtol=0.5,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_due_to_custom_atol(self):
x = constant_op.constant(0., name="x")
y = constant_op.constant(0.1, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.5, rtol=0.,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_near(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_atol_violated(self):
x = constant_op.constant(10., name="x")
y = constant_op.constant(10.2, name="y")
with self.assertRaisesOpError("x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.1,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_default_rtol_violated(self):
x = constant_op.constant(0.1, name="x")
y = constant_op.constant(0.0, name="y")
with self.assertRaisesOpError("x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1., 2.])
t2 = constant_op.constant([1., 2.])
x = check_ops.assert_near(t1, t2)
assert x is None
class AssertLessTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("failure message.*\n*.* x < y did not hold"):
with ops.control_dependencies(
[check_ops.assert_less(
small, small, message="failure message")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("x < y did not hold"):
with ops.control_dependencies([check_ops.assert_less(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_less(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_less(t1, t2)
assert x is None
class AssertLessEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_less_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_less_equal(
big, small, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([1, 1, 1], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_less_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertGreaterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater(
small, small, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("x > y did not hold"):
with ops.control_dependencies([check_ops.assert_greater(small, big)]):
out = array_ops.identity(big)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_greater(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertGreaterEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_greater_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(
small, big, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_negative(self):
frank = constant_op.constant([-1, -2], name="frank")
with ops.control_dependencies([check_ops.assert_negative(frank)]):
out = array_ops.identity(frank)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_positive(self):
doug = constant_op.constant([1, 2], name="doug")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_negative(
doug, message="fail")]):
out = array_ops.identity(doug)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_zero(self):
claire = constant_op.constant([0], name="claire")
with self.assertRaisesOpError("x < 0 did not hold"):
with ops.control_dependencies([check_ops.assert_negative(claire)]):
out = array_ops.identity(claire)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_negative(self):
freddie = constant_op.constant([-1, -2], name="freddie")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_positive(
freddie, message="fail")]):
out = array_ops.identity(freddie)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_positive(self):
remmy = constant_op.constant([1, 2], name="remmy")
with ops.control_dependencies([check_ops.assert_positive(remmy)]):
out = array_ops.identity(remmy)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_zero(self):
meechum = constant_op.constant([0], name="meechum")
with self.assertRaisesOpError("x > 0 did not hold"):
with ops.control_dependencies([check_ops.assert_positive(meechum)]):
out = array_ops.identity(meechum)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertRankTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError,
"fail.*must have rank 1"):
with ops.control_dependencies(
[check_ops.assert_rank(
tensor, desired_rank, message="fail")]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(
tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
check_ops.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
[1, 2], dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
check_ops.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
[1, 2], dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankInTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_raises_if_rank_mismatch_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
with self.assertRaisesRegexp(
ValueError, "fail.*must have rank.*in.*1.*2"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
self.evaluate(array_ops.identity(tensor_rank0))
def test_rank_zero_tensor_raises_if_rank_mismatch_dynamic_rank(self):
with self.test_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank0))
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.test_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank1 = constant_op.constant([42, 43], name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank1))
def test_rank_one_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.test_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
self.evaluate(array_ops.identity(tensor_rank1))
def test_rank_one_tensor_raises_if_rank_mismatches_dynamic_rank(self):
with self.test_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
desired_ranks = (
np.array(1, dtype=np.int32),
np.array((2, 1), dtype=np.int32))
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
check_ops.assert_rank_in(tensor, desired_ranks)
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
desired_ranks = (
array_ops.placeholder(dtypes.int32, name="rank0_tensor"),
array_ops.placeholder(dtypes.int32, name="rank1_tensor"))
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
(check_ops.assert_rank_in(tensor, desired_ranks),)):
array_ops.identity(tensor).eval(feed_dict={
desired_ranks[0]: 1,
desired_ranks[1]: [2, 1],
})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
check_ops.assert_rank_in(tensor, (1, .5,))
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with ops.control_dependencies(
[check_ops.assert_rank_in(tensor, (1, rank_tensor))]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "rank at least 1"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank at least 2"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_negative(self):
zoe = constant_op.constant([-1, -2], name="zoe")
with self.assertRaisesOpError("x >= 0 did not hold"):
with ops.control_dependencies([check_ops.assert_non_negative(zoe)]):
out = array_ops.identity(zoe)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_zero_and_positive(self):
lucas = constant_op.constant([0, 2], name="lucas")
with ops.control_dependencies([check_ops.assert_non_negative(lucas)]):
out = array_ops.identity(lucas)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_non_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertNonPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_zero_and_negative(self):
tom = constant_op.constant([0, -2], name="tom")
with ops.control_dependencies([check_ops.assert_non_positive(tom)]):
out = array_ops.identity(tom)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_positive(self):
rachel = constant_op.constant([0, 2], name="rachel")
with self.assertRaisesOpError("x <= 0 did not hold"):
with ops.control_dependencies([check_ops.assert_non_positive(rachel)]):
out = array_ops.identity(rachel)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_non_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertIntegerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_integer(self):
integers = constant_op.constant([1, 2], name="integers")
with ops.control_dependencies([check_ops.assert_integer(integers)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_float(self):
floats = constant_op.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
check_ops.assert_integer(floats)
class AssertTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_correct_type(self):
integers = constant_op.constant([1, 2], dtype=dtypes.int64)
with ops.control_dependencies([
check_ops.assert_type(integers, dtypes.int64)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_wrong_type(self):
floats = constant_op.constant([1.0, 2.0], dtype=dtypes.float16)
with self.assertRaisesRegexp(TypeError, "must be of type.*float32"):
check_ops.assert_type(floats, dtypes.float32)
class IsStrictlyIncreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_constant_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(check_ops.is_strictly_increasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes()
def test_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(
check_ops.is_strictly_increasing([1, 0, -1])))
@test_util.run_in_graph_and_eager_modes()
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(
self.evaluate(check_ops.is_strictly_increasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_tensor_is_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_rank_two_tensor(self):
self.assertTrue(
self.evaluate(check_ops.is_strictly_increasing([[-1, 2], [3, 4]])))
@test_util.run_in_graph_and_eager_modes()
def test_tensor_with_one_element_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1])))
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([])))
class IsNonDecreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_constant_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes()
def test_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(check_ops.is_non_decreasing([3, 2, 1])))
@test_util.run_in_graph_and_eager_modes()
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(
check_ops.is_non_decreasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_rank_one_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_rank_two_tensor(self):
self.assertTrue(self.evaluate(
check_ops.is_non_decreasing([[-1, 2], [3, 3]])))
@test_util.run_in_graph_and_eager_modes()
def test_tensor_with_one_element_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1])))
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([])))
class FloatDTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_assert_same_float_dtype(self):
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, None))
self.assertIs(dtypes.float32, check_ops.assert_same_float_dtype([], None))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([], dtypes.float32))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, dtypes.float32))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([None, None], None))
self.assertIs(
dtypes.float32,
check_ops.assert_same_float_dtype([None, None], dtypes.float32))
const_float = constant_op.constant(3.0, dtype=dtypes.float32)
self.assertIs(
dtypes.float32,
check_ops.assert_same_float_dtype([const_float], dtypes.float32))
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_float], dtypes.int32)
sparse_float = sparse_tensor.SparseTensor(
constant_op.constant([[111], [232]], dtypes.int64),
constant_op.constant([23.4, -43.2], dtypes.float32),
constant_op.constant([500], dtypes.int64))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([sparse_float],
dtypes.float32))
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float], dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_float, None, sparse_float], dtypes.float64)
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(
[const_float, sparse_float]))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(
[const_float, sparse_float], dtypes.float32))
const_int = constant_op.constant(3, dtype=dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int])
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int], dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int], dtypes.float32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_int])
class AssertScalarTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_assert_scalar(self):
check_ops.assert_scalar(constant_op.constant(3))
check_ops.assert_scalar(constant_op.constant("foo"))
check_ops.assert_scalar(3)
check_ops.assert_scalar("foo")
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
check_ops.assert_scalar(constant_op.constant([3, 4]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
andalexo/bgv | bgvDataDisp/guiqt.py | 1 | 8538 | """
The PyQt4 GUI classes for the bgvdata package.
"""
from __future__ import print_function, division
import logging
from os.path import splitext
try:
from PyQt4.QtGui import QMainWindow, QDockWidget
from PyQt4 import QtCore
except ImportError:
from PyQt5.QtWidgets import QMainWindow, QDockWidget
from PyQt5 import QtCore
from pyqttoolbox.auxwidgets import StatusBar, ToolBar, MenuBar, PlayStop
from pyqttoolbox.auxwidgets import TabbedWidgets, TreeFileExplorer
from pyqttoolbox.pgplotting_old import BufferProvider
from pyqttoolbox.pgplotting import ScrollArea, PgDockArea
from pyqttoolbox.threadtools import Worker
from wpap_bgv import BgvDataFileCtrl, TriggerDataCtrl
# CONSTANTS
BGV_LOGDB_DICT = {'BEAM ENERGY': ('HX:ENG',),
'BEAM INTENSITY': ('LHC.BCTDC.A6R4.B1:BEAM_INTENSITY',
'LHC.BCTDC.A6R4.B2:BEAM_INTENSITY'),
'BUNCH INTENSITY': ('LHC.BCTFR.A6R4.B1:BUNCH_INTENSITY',
'LHC.BCTFR.A6R4.B2:BUNCH_INTENSITY'),
'FILLED BUCKETS': ('LHC.BQM.B1:FILLED_BUCKETS',
'LHC.BQM.B2:FILLED_BUCKETS'),
'VACUUM PRESSURE': ('VGI.439.7L4.R.PR', 'VGI.483.7L4.B2.PR',
'VGI.147.7L4.R.PR', 'VGI.147.7L4.B.PR'
'VGI.141.6L4.B.PR', 'VGI.163.6L4.R.PR'),
'RADMON - TID1': ('SIMA.7L4.4LM19S:TID1_INT',),
'RADMON - SEU': ('SIMA.7L4.4LM19S:SEU_COUNTS_INT', ),
'BSRT SIGMA': ('LHC.BSRT.5L4.B2:FIT_SIGMA_H',
'LHC.BSRT.5L4.B2:FIT_SIGMA_V'),
'BGV TEMPS': ('BGVDA.A7L4.B2:NOVASINA_DEWP_MANIFOLD',
'BGVDA.A7L4.B2:NOVASINA_DEWP_TENT',
'BGVDA.A7L4.B2:NOVASINA_TEMP_MANIFOLD',
'BGVDA.A7L4.B2:NOVASINA_TEMP_TENT',
'BGVDA.A7L4.B2:TEMP_CHM_CONE',
'BGVDA.A7L4.B2:TEMP_CHM_WIN_AL',
'BGVDA.A7L4.B2:TEMP_CHM_WIN_ST',
'BGVDA.A7L4.B2:TEMP_DET_FTI')
}
ISO_FMT = '%Y-%m-%d %H:%M:%S'
logger = logging.getLogger(__name__)
class BGVDataGUI(QMainWindow):
"""
Description.
"""
def __init__(self, **kwargs):
super(BGVDataGUI, self).__init__()
self.setWindowTitle('BGV Event Display')
#######################################################################
# Lists for filling widgets
#######################################################################
tab_dict = {} # For the tab widget {title: widget}
tb_widgets_list = [] # For the Toolbar
tb_actions_list = []
#######################################################################
# Basic Widgets - Status Bar, Menu Bar, Scroll Area, File Explorer
#######################################################################
self.status = StatusBar(reset_str='Ready')
menu = MenuBar()
self.toolbar = ToolBar()
self.sa = ScrollArea(drops=False)
self.da = PgDockArea()
self.sa.setWidget(self.da)
self.tfe = TreeFileExplorer()
# self.tfe.setMinimumWidth(500)
self.tfe.openFile.connect(self.data_file_open)
self.d_tree = QDockWidget('File Explorer')
self.d_tree.setWidget(self.tfe)
values = [v*0.5 for v in range(5)]
self.playstop = PlayStop(delay=True, values=values)
# TODO: check where to connect the signals
tb_widgets_list.append(self.playstop)
tb_widgets_list.append('s')
#######################################################################
# Providers & Algorithms
#######################################################################
# Setting the providers to the event box
self.bgv_ctrl = BgvDataFileCtrl(dock_area=self.da,
flow_ctrl=self.playstop,
tfe=self.tfe)
tab_dict['Evnt Ctrl'] = self.bgv_ctrl
self.trg_ctrl = TriggerDataCtrl(dock_area=self.da)
tab_dict['Trg Ctrl'] = self.trg_ctrl
# Setting the Logging Database
self.logdb_wdt = None
try:
from wpap_logdb import LogDBWidget
self.logdb_wdt = LogDBWidget(def_vars=BGV_LOGDB_DICT,
editable=True, dock_area=self.da)
self.d_logdb_wdt = QDockWidget('LogDB')
self.d_logdb_wdt.setWidget(self.logdb_wdt)
self.logdb_wdt.clicked_add() # just init it with a dummy var
except (ImportError, TypeError) as e:
logger.warning('LogDB controller could not be loaded.\n\t%s' % e)
self.logdb_wdt = None
#######################################################################
# Setting the tabs
#######################################################################
self.tabs = TabbedWidgets(tab_dict.values(),
tab_titles=tab_dict.keys())
self.d_tabs = QDockWidget('Control Tabs')
self.d_tabs.setWidget(self.tabs)
# Setting the ToolBar
tb_actions_list.append(self.d_tabs.toggleViewAction())
tb_actions_list.append(self.d_tree.toggleViewAction())
if self.logdb_wdt is not None:
tb_actions_list.append(self.d_logdb_wdt.toggleViewAction())
self.toolbar = ToolBar(actions=tb_actions_list,
widgets=tb_widgets_list, seq='wa')
# Layout
self.setStatusBar(self.status)
self.setMenuBar(menu)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.d_tree)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea, self.d_tabs)
if self.logdb_wdt is not None:
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.d_logdb_wdt)
self.d_logdb_wdt.close() # keep it closed at init
self.setCentralWidget(self.sa)
##################################################
# TreeFileExplorer - Open File #
##################################################
def data_file_open(self):
ext = splitext(self.tfe.path)[1]
logger.debug('data_file_open: %s / ext:%s' % (self.tfe.path, ext))
self.status.showMessageDelay('File set: %s' % self.tfe.path)
if ext == '.csv':
thread = Worker(target=self.algo_seq_csv)
thread.start()
elif ext == '.h5':
thread = Worker(target=self.bgv_ctrl.load_algo)
thread.start()
elif ext == '.tdat':
logger.debug('Trigger data file to open')
thread = Worker(target=self.trg_ctrl.trg_retriever.execute,
args=(self.tfe.path,))
thread.start()
elif ext == '.bdat':
logger.debug('Bunch data file to open')
thread = Worker(target=self.trg_ctrl.bch_retriever.execute,
args=(self.tfe.path,))
thread.start()
elif ext == '.rdat':
logger.debug('RadMon file to open')
elif ext == '.mdf':
logger.debug('MDF file to open')
else:
logger.warning('Unrecognized file extension: [%s]' % ext)
##################################################
# Checking prerequisites #
##################################################
# TODO: To be removed - this better to be inside the providers class.
# Give prerequisites as keyword, then when enabled, enable them too.
def data_prov_change(self, label, index, state):
logger.debug('data_prov_change: lbl:%s, indx:%d, state:%s'
% (label, index, state))
label = str(label)
if state:
bp = BufferProvider(self.da, title=label)
self.data_providers[label] = bp
self.data_providers[label].setChecked(state)
else:
try:
self.data_providers[label].setChecked(state)
del self.data_providers[label]
except KeyError:
logger.warning('KeyError should not happen')
logger.debug('self.data_providers: %s' % self.data_providers)
| mit |
huongttlan/statsmodels | statsmodels/stats/tests/test_inter_rater.py | 34 | 11513 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 09:18:14 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.stats.inter_rater import (fleiss_kappa, cohens_kappa,
to_table, aggregate_raters)
class Holder(object):
pass
table0 = np.asarray('''\
1 0 0 0 0 14 1.000
2 0 2 6 4 2 0.253
3 0 0 3 5 6 0.308
4 0 3 9 2 0 0.440
5 2 2 8 1 1 0.330
6 7 7 0 0 0 0.462
7 3 2 6 3 0 0.242
8 2 5 3 2 2 0.176
9 6 5 2 1 0 0.286
10 0 2 2 3 7 0.286'''.split(), float).reshape(10,-1)
table1 = table0[:, 1:-1]
table10 = [[0, 4, 1],
[0, 8, 0],
[0, 1, 5]]
#Fleiss 1971, Fleiss has only the transformed table
diagnoses = np.array( [[4, 4, 4, 4, 4, 4],
[2, 2, 2, 5, 5, 5],
[2, 3, 3, 3, 3, 5],
[5, 5, 5, 5, 5, 5],
[2, 2, 2, 4, 4, 4],
[1, 1, 3, 3, 3, 3],
[3, 3, 3, 3, 5, 5],
[1, 1, 3, 3, 3, 4],
[1, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 5, 5],
[1, 4, 4, 4, 4, 4],
[1, 2, 4, 4, 4, 4],
[2, 2, 2, 3, 3, 3],
[1, 4, 4, 4, 4, 4],
[2, 2, 4, 4, 4, 5],
[3, 3, 3, 3, 3, 5],
[1, 1, 1, 4, 5, 5],
[1, 1, 1, 1, 1, 2],
[2, 2, 4, 4, 4, 4],
[1, 3, 3, 5, 5, 5],
[5, 5, 5, 5, 5, 5],
[2, 4, 4, 4, 4, 4],
[2, 2, 4, 5, 5, 5],
[1, 1, 4, 4, 4, 4],
[1, 4, 4, 4, 4, 5],
[2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 5, 5],
[2, 2, 4, 4, 4, 4],
[1, 3, 3, 3, 3, 3],
[5, 5, 5, 5, 5, 5]])
diagnoses_rownames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', ]
diagnoses_colnames = ['rater1', 'rater2', 'rater3', 'rater4', 'rater5', 'rater6', ]
def test_fleiss_kappa():
#currently only example from Wikipedia page
kappa_wp = 0.210
assert_almost_equal(fleiss_kappa(table1), kappa_wp, decimal=3)
class CheckCohens(object):
def test_results(self):
res = self.res
res2 = self.res2
res_ = [res.kappa, res.std_kappa, res.kappa_low, res.kappa_upp, res.std_kappa0,
res.z_value, res.pvalue_one_sided, res.pvalue_two_sided]
assert_almost_equal(res_, res2, decimal=4)
assert_equal(str(res), self.res_string)
class UnweightedCohens(CheckCohens):
#comparison to printout of a SAS example
def __init__(self):
#temporary: res instance is at last position
self.res = cohens_kappa(table10)
res10_sas = [0.4842, 0.1380, 0.2137, 0.7547]
res10_sash0 = [0.1484, 3.2626, 0.0006, 0.0011] #for test H0:kappa=0
self.res2 = res10_sas + res10_sash0 #concatenate
self.res_string = '''\
Simple Kappa Coefficient
--------------------------------
Kappa 0.4842
ASE 0.1380
95% Lower Conf Limit 0.2137
95% Upper Conf Limit 0.7547
Test of H0: Simple Kappa = 0
ASE under H0 0.1484
Z 3.2626
One-sided Pr > Z 0.0006
Two-sided Pr > |Z| 0.0011''' + '\n'
def test_option(self):
kappa = cohens_kappa(table10, return_results=False)
assert_almost_equal(kappa, self.res2[0], decimal=4)
class TestWeightedCohens(CheckCohens):
#comparison to printout of a SAS example
def __init__(self):
#temporary: res instance is at last position
self.res = cohens_kappa(table10, weights=[0, 1, 2])
res10w_sas = [0.4701, 0.1457, 0.1845, 0.7558]
res10w_sash0 = [0.1426, 3.2971, 0.0005, 0.0010] #for test H0:kappa=0
self.res2 = res10w_sas + res10w_sash0 #concatenate
self.res_string = '''\
Weighted Kappa Coefficient
--------------------------------
Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010''' + '\n'
def test_option(self):
kappa = cohens_kappa(table10, weights=[0, 1, 2], return_results=False)
assert_almost_equal(kappa, self.res2[0], decimal=4)
def test_cohenskappa_weights():
#some tests for equivalent results with different options
np.random.seed(9743678)
table = np.random.randint(0, 10, size=(5,5)) + 5*np.eye(5)
#example aggregation, 2 groups of levels
mat = np.array([[1,1,1, 0,0],[0,0,0,1,1]])
table_agg = np.dot(np.dot(mat, table), mat.T)
res1 = cohens_kappa(table, weights=np.arange(5) > 2, wt='linear')
res2 = cohens_kappa(table_agg, weights=np.arange(2), wt='linear')
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
#equivalence toeplitz with linear for special cases
res1 = cohens_kappa(table, weights=2*np.arange(5), wt='linear')
res2 = cohens_kappa(table, weights=2*np.arange(5), wt='toeplitz')
res3 = cohens_kappa(table, weights=res1.weights[0], wt='toeplitz')
#2-Dim weights
res4 = cohens_kappa(table, weights=res1.weights)
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
assert_almost_equal(res1.kappa, res3.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res3.var_kappa, decimal=14)
assert_almost_equal(res1.kappa, res4.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res4.var_kappa, decimal=14)
#equivalence toeplitz with quadratic for special cases
res1 = cohens_kappa(table, weights=5*np.arange(5)**2, wt='toeplitz')
res2 = cohens_kappa(table, weights=5*np.arange(5), wt='quadratic')
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
anxiety = np.array([
3, 3, 3, 4, 5, 5, 2, 3, 5, 2, 2, 6, 1, 5, 2, 2, 1, 2, 4, 3, 3, 6, 4,
6, 2, 4, 2, 4, 3, 3, 2, 3, 3, 3, 2, 2, 1, 3, 3, 4, 2, 1, 4, 4, 3, 2,
1, 6, 1, 1, 1, 2, 3, 3, 1, 1, 3, 3, 2, 2
]).reshape(20,3, order='F')
anxiety_rownames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', ]
anxiety_colnames = ['rater1', 'rater2', 'rater3', ]
def test_cohens_kappa_irr():
ck_w3 = Holder()
ck_w4 = Holder()
#>r = kappa2(anxiety[,1:2], c(0,0,0,1,1,1))
#> cat_items(r, pref="ck_w3.")
ck_w3.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,0,1,1,1)"
ck_w3.irr_name = 'Kappa'
ck_w3.value = 0.1891892
ck_w3.stat_name = 'z'
ck_w3.statistic = 0.5079002
ck_w3.p_value = 0.6115233
#> r = kappa2(anxiety[,1:2], c(0,0,1,1,2,2))
#> cat_items(r, pref="ck_w4.")
ck_w4.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,1,1,2,2)"
ck_w4.irr_name = 'Kappa'
ck_w4.value = 0.2820513
ck_w4.stat_name = 'z'
ck_w4.statistic = 1.257410
ck_w4.p_value = 0.2086053
ck_w1 = Holder()
ck_w2 = Holder()
ck_w3 = Holder()
ck_w4 = Holder()
#> r = kappa2(anxiety[,2:3])
#> cat_items(r, pref="ck_w1.")
ck_w1.method = "Cohen's Kappa for 2 Raters (Weights: unweighted)"
ck_w1.irr_name = 'Kappa'
ck_w1.value = -0.006289308
ck_w1.stat_name = 'z'
ck_w1.statistic = -0.0604067
ck_w1.p_value = 0.9518317
#> r = kappa2(anxiety[,2:3], "equal")
#> cat_items(r, pref="ck_w2.")
ck_w2.method = "Cohen's Kappa for 2 Raters (Weights: equal)"
ck_w2.irr_name = 'Kappa'
ck_w2.value = 0.1459075
ck_w2.stat_name = 'z'
ck_w2.statistic = 1.282472
ck_w2.p_value = 0.1996772
#> r = kappa2(anxiety[,2:3], "squared")
#> cat_items(r, pref="ck_w3.")
ck_w3.method = "Cohen's Kappa for 2 Raters (Weights: squared)"
ck_w3.irr_name = 'Kappa'
ck_w3.value = 0.2520325
ck_w3.stat_name = 'z'
ck_w3.statistic = 1.437451
ck_w3.p_value = 0.1505898
#> r = kappa2(anxiety[,2:3], c(0,0,1,1,2))
#> cat_items(r, pref="ck_w4.")
ck_w4.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,1,1,2)"
ck_w4.irr_name = 'Kappa'
ck_w4.value = 0.2391304
ck_w4.stat_name = 'z'
ck_w4.statistic = 1.223734
ck_w4.p_value = 0.2210526
all_cases = [(ck_w1, None, None),
(ck_w2, None, 'linear'),
(ck_w2, np.arange(5), None),
(ck_w2, np.arange(5), 'toeplitz'),
(ck_w3, None, 'quadratic'),
(ck_w3, np.arange(5)**2, 'toeplitz'),
(ck_w3, 4*np.arange(5)**2, 'toeplitz'),
(ck_w4, [0,0,1,1,2], 'toeplitz')]
#Note R:irr drops the missing category level 4 and uses the reduced matrix
r = np.histogramdd(anxiety[:,1:], ([1, 2, 3, 4, 6, 7], [1, 2, 3, 4, 6, 7]))
for res2, w, wt in all_cases:
msg = repr(w) + repr(wt)
res1 = cohens_kappa(r[0], weights=w, wt=wt)
assert_almost_equal(res1.kappa, res2.value, decimal=6, err_msg=msg)
assert_almost_equal(res1.z_value, res2.statistic, decimal=5, err_msg=msg)
assert_almost_equal(res1.pvalue_two_sided, res2.p_value, decimal=6, err_msg=msg)
def test_fleiss_kappa_irr():
fleiss = Holder()
#> r = kappam.fleiss(diagnoses)
#> cat_items(r, pref="fleiss.")
fleiss.method = "Fleiss' Kappa for m Raters"
fleiss.irr_name = 'Kappa'
fleiss.value = 0.4302445
fleiss.stat_name = 'z'
fleiss.statistic = 17.65183
fleiss.p_value = 0
data_ = aggregate_raters(diagnoses)[0]
res1_kappa = fleiss_kappa(data_)
assert_almost_equal(res1_kappa, fleiss.value, decimal=7)
def test_to_table():
data = diagnoses
res1 = to_table(data[:,:2]-1, 5)
res0 = np.asarray([[(data[:,:2]-1 == [i,j]).all(1).sum()
for j in range(5)]
for i in range(5)] )
assert_equal(res1[0], res0)
res2 = to_table(data[:,:2])
assert_equal(res2[0], res0)
bins = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
res3 = to_table(data[:,:2], bins)
assert_equal(res3[0], res0)
#more than 2 columns
res4 = to_table(data[:,:3]-1, bins=[-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
res5 = to_table(data[:,:3]-1, bins=5)
assert_equal(res4[0].sum(-1), res0)
assert_equal(res5[0].sum(-1), res0)
def test_aggregate_raters():
data = diagnoses
resf = aggregate_raters(data)
colsum = np.array([26, 26, 30, 55, 43])
assert_equal(resf[0].sum(0), colsum)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'#, '--pdb-failures'
], exit=False)
| bsd-3-clause |
t0in4/django | tests/admin_checks/models.py | 281 | 1836 | """
Tests of ModelAdmin system checks logic.
"""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album, models.CASCADE)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, models.CASCADE, related_name="album1_set")
album2 = models.ForeignKey(Album, models.CASCADE, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
book = models.ForeignKey(Book, models.CASCADE)
featured = models.BooleanField()
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
class Influence(models.Model):
name = models.TextField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
| bsd-3-clause |
meziti/bigbullions-test | p2pool/bitcoin/worker_interface.py | 230 | 5901 | from __future__ import division
import StringIO
import json
import random
import sys
from twisted.internet import defer
import p2pool
from p2pool.bitcoin import data as bitcoin_data, getwork
from p2pool.util import expiring_dict, jsonrpc, pack, variable
class _Provider(object):
def __init__(self, parent, long_poll):
self.parent = parent
self.long_poll = long_poll
def rpc_getwork(self, request, data=None):
return self.parent._getwork(request, data, long_poll=self.long_poll)
class _GETableServer(jsonrpc.HTTPServer):
def __init__(self, provider, render_get_func):
jsonrpc.HTTPServer.__init__(self, provider)
self.render_GET = render_get_func
class WorkerBridge(object):
def __init__(self):
self.new_work_event = variable.Event()
def preprocess_request(self, request):
return request, # *args to self.compute
def get_work(self, request):
raise NotImplementedError()
class WorkerInterface(object):
def __init__(self, worker_bridge):
self.worker_bridge = worker_bridge
self.worker_views = {}
self.merkle_root_to_handler = expiring_dict.ExpiringDict(300)
def attach_to(self, res, get_handler=None):
res.putChild('', _GETableServer(_Provider(self, long_poll=False), get_handler))
def repost(request):
request.content = StringIO.StringIO(json.dumps(dict(id=0, method='getwork')))
return s.render_POST(request)
s = _GETableServer(_Provider(self, long_poll=True), repost)
res.putChild('long-polling', s)
@defer.inlineCallbacks
def _getwork(self, request, data, long_poll):
request.setHeader('X-Long-Polling', '/long-polling')
request.setHeader('X-Roll-NTime', 'expire=100')
request.setHeader('X-Is-P2Pool', 'true')
if request.getHeader('Host') is not None:
request.setHeader('X-Stratum', 'stratum+tcp://' + request.getHeader('Host'))
if data is not None:
header = getwork.decode_data(data)
if header['merkle_root'] not in self.merkle_root_to_handler:
print >>sys.stderr, '''Couldn't link returned work's merkle root with its handler. This should only happen if this process was recently restarted!'''
defer.returnValue(False)
defer.returnValue(self.merkle_root_to_handler[header['merkle_root']](header, request.getUser() if request.getUser() is not None else '', '\0'*self.worker_bridge.COINBASE_NONCE_LENGTH))
if p2pool.DEBUG:
id = random.randrange(1000, 10000)
print 'POLL %i START is_long_poll=%r user_agent=%r user=%r' % (id, long_poll, request.getHeader('User-Agent'), request.getUser())
if long_poll:
request_id = request.getClientIP(), request.getHeader('Authorization')
if self.worker_views.get(request_id, self.worker_bridge.new_work_event.times) != self.worker_bridge.new_work_event.times:
if p2pool.DEBUG:
print 'POLL %i PUSH' % (id,)
else:
if p2pool.DEBUG:
print 'POLL %i WAITING' % (id,)
yield self.worker_bridge.new_work_event.get_deferred()
self.worker_views[request_id] = self.worker_bridge.new_work_event.times
x, handler = self.worker_bridge.get_work(*self.worker_bridge.preprocess_request(request.getUser() if request.getUser() is not None else ''))
res = getwork.BlockAttempt(
version=x['version'],
previous_block=x['previous_block'],
merkle_root=bitcoin_data.check_merkle_link(bitcoin_data.hash256(x['coinb1'] + '\0'*self.worker_bridge.COINBASE_NONCE_LENGTH + x['coinb2']), x['merkle_link']),
timestamp=x['timestamp'],
bits=x['bits'],
share_target=x['share_target'],
)
assert res.merkle_root not in self.merkle_root_to_handler
self.merkle_root_to_handler[res.merkle_root] = handler
if p2pool.DEBUG:
print 'POLL %i END identifier=%i' % (id, self.worker_bridge.new_work_event.times)
extra_params = {}
if request.getHeader('User-Agent') == 'Jephis PIC Miner':
# ASICMINER BE Blades apparently have a buffer overflow bug and
# can't handle much extra in the getwork response
extra_params = {}
else:
extra_params = dict(identifier=str(self.worker_bridge.new_work_event.times), submitold=True)
defer.returnValue(res.getwork(**extra_params))
class CachingWorkerBridge(object):
def __init__(self, inner):
self._inner = inner
self.net = self._inner.net
self.COINBASE_NONCE_LENGTH = (inner.COINBASE_NONCE_LENGTH+1)//2
self.new_work_event = inner.new_work_event
self.preprocess_request = inner.preprocess_request
self._my_bits = (self._inner.COINBASE_NONCE_LENGTH - self.COINBASE_NONCE_LENGTH)*8
self._cache = {}
self._times = None
def get_work(self, *args):
if self._times != self.new_work_event.times:
self._cache = {}
self._times = self.new_work_event.times
if args not in self._cache:
x, handler = self._inner.get_work(*args)
self._cache[args] = x, handler, 0
x, handler, nonce = self._cache.pop(args)
res = (
dict(x, coinb1=x['coinb1'] + pack.IntType(self._my_bits).pack(nonce)),
lambda header, user, coinbase_nonce: handler(header, user, pack.IntType(self._my_bits).pack(nonce) + coinbase_nonce),
)
if nonce + 1 != 2**self._my_bits:
self._cache[args] = x, handler, nonce + 1
return res
| gpl-3.0 |
polynomial/nixops | nixops/resources/gce_image.py | 3 | 3281 | # -*- coding: utf-8 -*-
# Automatic provisioning of GCE Images.
import os
import libcloud.common.google
from nixops.util import attr_property
from nixops.gce_common import ResourceDefinition, ResourceState
class GCEImageDefinition(ResourceDefinition):
"""Definition of a GCE Image"""
@classmethod
def get_type(cls):
return "gce-image"
@classmethod
def get_resource_type(cls):
return "gceImages"
def __init__(self, xml):
ResourceDefinition.__init__(self, xml)
self.image_name = self.get_option_value(xml, 'name', str)
self.copy_option(xml, 'sourceUri', str)
self.copy_option(xml, 'description', str, optional = True)
def show_type(self):
return self.get_type()
class GCEImageState(ResourceState):
"""State of a GCE Image"""
image_name = attr_property("gce.name", None)
source_uri = attr_property("gce.sourceUri", None)
description = attr_property("gce.description", None)
@classmethod
def get_type(cls):
return "gce-image"
def __init__(self, depl, name, id):
ResourceState.__init__(self, depl, name, id)
def show_type(self):
return super(GCEImageState, self).show_type()
@property
def resource_id(self):
return self.image_name
nix_name = "gceImages"
@property
def full_name(self):
return "GCE image '{0}'".format(self.image_name)
def image(self):
img = self.connect().ex_get_image(self.image_name)
if img:
img.destroy = img.delete
return img
defn_properties = [ 'description', 'source_uri' ]
def create(self, defn, check, allow_reboot, allow_recreate):
self.no_property_change(defn, 'source_uri')
self.no_property_change(defn, 'description')
self.no_project_change(defn)
self.copy_credentials(defn)
self.image_name = defn.image_name
if check:
image = self.image()
if image:
if self.state == self.UP:
self.handle_changed_property('description', image.extra['description'], can_fix = False)
else:
self.warn_not_supposed_to_exist(valuable_data = True)
self.confirm_destroy(image, self.full_name)
else:
self.warn_missing_resource()
if self.state != self.UP:
self.log("creating {0}...".format(self.full_name))
try:
image = self.connect().ex_copy_image(defn.image_name, defn.source_uri,
description = defn.description)
except libcloud.common.google.ResourceExistsError:
raise Exception("tried creating an image that already exists; "
"please run 'deploy --check' to fix this")
self.state = self.UP
self.copy_properties(defn)
def destroy(self, wipe=False):
if self.state == self.UP:
image = self.image()
if image:
return self.confirm_destroy(image, self.full_name, abort = False)
else:
self.warn("tried to destroy {0} which didn't exist".format(self.full_name))
return True
| lgpl-3.0 |
sergio-incaser/odoo | openerp/service/__init__.py | 380 | 1613 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import common
import db
import model
import report
import wsgi_server
import server
#.apidoc title: RPC Services
""" Classes of this module implement the network protocols that the
OpenERP server uses to communicate with remote clients.
Some classes are mostly utilities, whose API need not be visible to
the average user/developer. Study them only if you are about to
implement an extension to the network protocols, or need to debug some
low-level behavior of the wire.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MihaiMoldovanu/ansible | test/units/modules/cloud/amazon/test_kinesis_stream.py | 28 | 9780 | import pytest
import unittest
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
import ansible.modules.cloud.amazon.kinesis_stream as kinesis_stream
aws_region = 'us-west-2'
class AnsibleKinesisStreamFunctions(unittest.TestCase):
def test_convert_to_lower(self):
example = {
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
converted_example = kinesis_stream.convert_to_lower(example)
keys = list(converted_example.keys())
keys.sort()
for i in range(len(keys)):
if i == 0:
self.assertEqual(keys[i], 'has_more_shards')
if i == 1:
self.assertEqual(keys[i], 'retention_period_hours')
if i == 2:
self.assertEqual(keys[i], 'stream_arn')
if i == 3:
self.assertEqual(keys[i], 'stream_name')
if i == 4:
self.assertEqual(keys[i], 'stream_status')
def test_make_tags_in_aws_format(self):
example = {
'env': 'development'
}
should_return = [
{
'Key': 'env',
'Value': 'development'
}
]
aws_tags = kinesis_stream.make_tags_in_aws_format(example)
self.assertEqual(aws_tags, should_return)
def test_make_tags_in_proper_format(self):
example = [
{
'Key': 'env',
'Value': 'development'
},
{
'Key': 'service',
'Value': 'web'
}
]
should_return = {
'env': 'development',
'service': 'web'
}
proper_tags = kinesis_stream.make_tags_in_proper_format(example)
self.assertEqual(proper_tags, should_return)
def test_recreate_tags_from_list(self):
example = [('environment', 'development'), ('service', 'web')]
should_return = [
{
'Key': 'environment',
'Value': 'development'
},
{
'Key': 'service',
'Value': 'web'
}
]
aws_tags = kinesis_stream.recreate_tags_from_list(example)
self.assertEqual(aws_tags, should_return)
def test_get_tags(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, tags = kinesis_stream.get_tags(client, 'test', check_mode=True)
self.assertTrue(success)
should_return = [
{
'Key': 'DryRunMode',
'Value': 'true'
}
]
self.assertEqual(tags, should_return)
def test_find_stream(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, stream = (
kinesis_stream.find_stream(client, 'test', check_mode=True)
)
should_return = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 5,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_wait_for_status(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, stream = (
kinesis_stream.wait_for_status(
client, 'test', 'ACTIVE', check_mode=True
)
)
should_return = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 5,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_tags_action_create(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'create', check_mode=True
)
)
self.assertTrue(success)
def test_tags_action_delete(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'delete', check_mode=True
)
)
self.assertTrue(success)
def test_tags_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'append', check_mode=True
)
)
self.assertFalse(success)
def test_update_tags(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg = (
kinesis_stream.update_tags(
client, 'test', tags, check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_create(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'create', check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_delete(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'delete', check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'append', check_mode=True
)
)
self.assertFalse(success)
def test_retention_action_increase(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 48, 'increase', check_mode=True
)
)
self.assertTrue(success)
def test_retention_action_decrease(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 24, 'decrease', check_mode=True
)
)
self.assertTrue(success)
def test_retention_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 24, 'create', check_mode=True
)
)
self.assertFalse(success)
def test_update_shard_count(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.update_shard_count(
client, 'test', 5, check_mode=True
)
)
self.assertTrue(success)
def test_update(self):
client = boto3.client('kinesis', region_name=aws_region)
current_stream = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 1,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg = (
kinesis_stream.update(
client, current_stream, 'test', number_of_shards=2, retention_period=48,
tags=tags, check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
def test_create_stream(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg, results = (
kinesis_stream.create_stream(
client, 'test', number_of_shards=10, retention_period=48,
tags=tags, check_mode=True
)
)
should_return = {
'open_shards_count': 5,
'closed_shards_count': 0,
'shards_count': 5,
'has_more_shards': True,
'retention_period_hours': 24,
'stream_name': 'test',
'stream_arn': 'arn:aws:kinesis:east-side:123456789:stream/test',
'stream_status': 'ACTIVE',
'tags': tags,
}
self.assertTrue(success)
self.assertTrue(changed)
self.assertEqual(results, should_return)
self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
| gpl-3.0 |
sciurus/python_koans | python3/runner/sensei.py | 59 | 9937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
from . import helper
from .mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) not in ['AboutAsserts', 'AboutExtraCredit']:
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) != self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py{0}{1}" \
.format(Fore.RESET, Style.NORMAL))
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = list(filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons))
return self.all_lessons
| mit |
tqchen/tvm | tests/python/topi/python/test_topi_group_conv2d.py | 1 | 11141 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do group convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.util import get_const_tuple
from common import Int8Fallback
import tvm.testing
_group_conv2d_nchw_implement = {
"generic": (topi.nn.group_conv2d_nchw, topi.generic.schedule_group_conv2d_nchw),
"gpu": (topi.cuda.group_conv2d_nchw, topi.cuda.schedule_group_conv2d_nchw),
}
def verify_group_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
oc_block_factor = 4
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="int8")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W", dtype="int8")
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype="int8"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
if device == "cuda" and not tvm.contrib.nvcc.have_int8(ctx.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
for device in ["cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_group_conv2d_nchw():
# ResNeXt-50 workload
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nchw(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_NCHWc_int8():
with Int8Fallback():
# ResNeXt-50 workload
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_NCHWc_int8(
1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True
)
# dilation
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_NCHWc_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
if __name__ == "__main__":
test_group_conv2d_nchw()
test_group_conv2d_NCHWc_int8()
| apache-2.0 |
landism/pants | tests/python/pants_test/binaries/test_binary_util.py | 8 | 8219 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import mock
from pants.binaries.binary_util import BinaryUtil
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants_test.base_test import BaseTest
class BinaryUtilTest(BaseTest):
"""Tests binary_util's pants_support_baseurls handling."""
class MapFetcher(object):
"""Class which pretends to be a pants.net.http.Fetcher, but is actually a dictionary."""
def __init__(self, read_map):
self._map = read_map
def download(self, url, path_or_fd=None, **kwargs):
if not url in self._map:
raise IOError("404: Virtual URL '{}' does not exist.".format(url))
if not path_or_fd:
raise AssertionError("Expected path_or_fd to be set")
path_or_fd.write(self._map[url])
return path_or_fd
def keys(self):
return self._map.keys()
def values(self):
return self._map.values()
def __getitem__(self, key):
return self._map[key] # Vanilla internal map access (without lambda shenanigans).
@classmethod
def _fake_base(cls, name):
return 'fake-url-{name}'.format(name=name)
@classmethod
def _fake_url(cls, binaries, base, binary_key):
binary_util = BinaryUtil([], 0, '/tmp')
supportdir, version, name = binaries[binary_key]
binary = binary_util._select_binary_base_path(supportdir, version, binary_key)
return '{base}/{binary}'.format(base=base, binary=binary)
def test_timeout(self):
fetcher = mock.create_autospec(Fetcher, spec_set=True)
binary_util = BinaryUtil(baseurls=['http://binaries.example.com'],
timeout_secs=42,
bootstrapdir='/tmp')
self.assertFalse(fetcher.download.called)
with binary_util._select_binary_stream('a-binary', 'a-binary/v1.2/a-binary', fetcher=fetcher):
fetcher.download.assert_called_once_with('http://binaries.example.com/a-binary/v1.2/a-binary',
listener=mock.ANY,
path_or_fd=mock.ANY,
timeout_secs=42)
def test_nobases(self):
"""Tests exception handling if build support urls are improperly specified."""
binary_util = BinaryUtil(baseurls=[], timeout_secs=30, bootstrapdir='/tmp')
with self.assertRaises(binary_util.NoBaseUrlsError):
binary_path = binary_util._select_binary_base_path(supportdir='bin/protobuf',
version='2.4.1',
name='protoc')
with binary_util._select_binary_stream(name='protoc', binary_path=binary_path):
self.fail('Expected acquisition of the stream to raise.')
def test_support_url_multi(self):
"""Tests to make sure existing base urls function as expected."""
with temporary_dir() as invalid_local_files, temporary_dir() as valid_local_files:
binary_util = BinaryUtil(
baseurls=[
'BLATANTLY INVALID URL',
'https://dl.bintray.com/pantsbuild/bin/reasonably-invalid-url',
invalid_local_files,
valid_local_files,
'https://dl.bintray.com/pantsbuild/bin/another-invalid-url',
],
timeout_secs=30,
bootstrapdir='/tmp')
binary_path = binary_util._select_binary_base_path(supportdir='bin/protobuf',
version='2.4.1',
name='protoc')
contents = b'proof'
with safe_open(os.path.join(valid_local_files, binary_path), 'wb') as fp:
fp.write(contents)
with binary_util._select_binary_stream(name='protoc', binary_path=binary_path) as stream:
self.assertEqual(contents, stream())
def test_support_url_fallback(self):
"""Tests fallback behavior with multiple support baseurls.
Mocks up some dummy baseurls and then swaps out the URL reader to make sure urls are accessed
and others are not.
"""
fake_base, fake_url = self._fake_base, self._fake_url
bases = [fake_base('apple'), fake_base('orange'), fake_base('banana')]
binary_util = BinaryUtil(bases, 30, '/tmp')
binaries = {t[2]: t for t in (('bin/protobuf', '2.4.1', 'protoc'),
('bin/ivy', '4.3.7', 'ivy'),
('bin/bash', '4.4.3', 'bash'))}
fetcher = self.MapFetcher({
fake_url(binaries, bases[0], 'protoc'): 'SEEN PROTOC',
fake_url(binaries, bases[0], 'ivy'): 'SEEN IVY',
fake_url(binaries, bases[1], 'bash'): 'SEEN BASH',
fake_url(binaries, bases[1], 'protoc'): 'UNSEEN PROTOC 1',
fake_url(binaries, bases[2], 'protoc'): 'UNSEEN PROTOC 2',
fake_url(binaries, bases[2], 'ivy'): 'UNSEEN IVY 2',
})
unseen = [item for item in fetcher.values() if item.startswith('SEEN ')]
for supportdir, version, name in binaries.values():
binary_path = binary_util._select_binary_base_path(supportdir=supportdir,
version=version,
name=name)
with binary_util._select_binary_stream(name=name,
binary_path=binary_path,
fetcher=fetcher) as stream:
result = stream()
self.assertEqual(result, 'SEEN ' + name.upper())
unseen.remove(result)
self.assertEqual(0, len(unseen)) # Make sure we've seen all the SEENs.
def test_select_binary_base_path_linux(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "amd64"
self.assertEquals("supportdir/linux/x86_64/name/version",
binary_util._select_binary_base_path("supportdir", "name", "version",
uname_func=uname_func))
def test_select_binary_base_path_darwin(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "darwin", "dontcare1", "14.9", "dontcare2", "dontcare3",
self.assertEquals("supportdir/mac/10.10/name/version",
binary_util._select_binary_base_path("supportdir", "name", "version",
uname_func=uname_func))
def test_select_binary_base_path_missing_os(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "vms", "dontcare1", "999.9", "dontcare2", "VAX9"
with self.assertRaisesRegexp(BinaryUtil.MissingMachineInfo,
r'Pants has no binaries for vms'):
binary_util._select_binary_base_path("supportdir", "name", "version", uname_func=uname_func)
def test_select_binary_base_path_missing_version(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "darwin", "dontcare1", "999.9", "dontcare2", "x86_64"
os_id = ('darwin', '999')
with self.assertRaisesRegexp(BinaryUtil.MissingMachineInfo,
r'Update --binaries-path-by-id to find binaries for '
r'{}'.format(re.escape(repr(os_id)))):
binary_util._select_binary_base_path("supportdir", "name", "version", uname_func=uname_func)
def test_select_binary_base_path_override(self):
binary_util = BinaryUtil([], 0, '/tmp',
{('darwin', '100'): ['skynet', '42']})
def uname_func():
return "darwin", "dontcare1", "100.99", "dontcare2", "t1000"
self.assertEquals("supportdir/skynet/42/name/version",
binary_util._select_binary_base_path("supportdir", "name", "version",
uname_func=uname_func))
| apache-2.0 |
goodwinnk/intellij-community | python/helpers/py2only/docutils/languages/nl.py | 200 | 1865 | # $Id: nl.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martijn Pieters <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Dutch-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisatie',
'address': 'Adres',
'contact': 'Contact',
'version': 'Versie',
'revision': 'Revisie',
'status': 'Status',
'date': 'Datum',
'copyright': 'Copyright',
'dedication': 'Toewijding',
'abstract': 'Samenvatting',
'attention': 'Attentie!',
'caution': 'Let op!',
'danger': '!GEVAAR!',
'error': 'Fout',
'hint': 'Hint',
'important': 'Belangrijk',
'note': 'Opmerking',
'tip': 'Tip',
'warning': 'Waarschuwing',
'contents': 'Inhoud'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'auteur': 'author',
'auteurs': 'authors',
'organisatie': 'organization',
'adres': 'address',
'contact': 'contact',
'versie': 'version',
'revisie': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'toewijding': 'dedication',
'samenvatting': 'abstract'}
"""Dutch (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
percipient/raven-python | raven/contrib/bottle/utils.py | 25 | 1045 | """
raven.contrib.bottle.utils
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
logger = logging.getLogger(__name__)
def get_data_from_request(request):
urlparts = _urlparse.urlsplit(request.url)
try:
form_dict = request.forms.dict
# we only are about the most recent one
formdata = dict([(k, form_dict[k][-1]) for k in form_dict])
except Exception:
formdata = {}
data = {
'request': {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
}
return data
| bsd-3-clause |
sauloal/pycluster | pypy-1.9_64/lib-python/2.7/plat-mac/lib-scriptpackages/Explorer/Microsoft_Internet_Explorer.py | 82 | 3140 | """Suite Microsoft Internet Explorer Suite: Events defined by Internet Explorer
Level 1, version 1
Generated from /Applications/Internet Explorer.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'MSIE'
class Microsoft_Internet_Explorer_Events:
def GetSource(self, _object=None, _attributes={}, **_arguments):
"""GetSource: Get the HTML source of a browser window
Required argument: Window Identifier of window from which to get the source. No value means get the source from the frontmost window.
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: undocumented, typecode 'TEXT'
"""
_code = 'MSIE'
_subcode = 'SORC'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def PrintBrowserWindow(self, _object=None, _attributes={}, **_arguments):
"""PrintBrowserWindow: Print contents of browser window (HTML)
Required argument: Window Identifier of the window to print. No value means print the frontmost browser window.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'pWND'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_do_script = {
'window' : 'WIND',
}
def do_script(self, _object, _attributes={}, **_arguments):
"""do script: Execute script commands
Required argument: JavaScript text to execute
Keyword argument window: optional Window Identifier (as supplied by the ListWindows event) specifying context in which to execute the script
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Return value
"""
_code = 'misc'
_subcode = 'dosc'
aetools.keysubst(_arguments, self._argmap_do_script)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| mit |
ujenmr/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_facts.py | 29 | 3228 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_certificate_facts
short_description: Gather facts about DigitalOcean certificates
description:
- This module can be used to gather facts about DigitalOcean provided certificates.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
certificate_id:
description:
- Certificate ID that can be used to identify and reference a certificate.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather facts about all certificates
digital_ocean_certificate_facts:
oauth_token: "{{ oauth_token }}"
- name: Gather facts about certificate with given id
digital_ocean_certificate_facts:
oauth_token: "{{ oauth_token }}"
certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf"
- name: Get not after facts about certificate
digital_ocean_certificate_facts:
register: resp_out
- set_fact:
not_after_date: "{{ item.not_after }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='web-cert-01']"
- debug: var=not_after_date
'''
RETURN = '''
data:
description: DigitalOcean certificate facts
returned: success
type: list
sample: [
{
"id": "892071a0-bb95-49bc-8021-3afd67a210bf",
"name": "web-cert-01",
"not_after": "2017-02-22T00:23:00Z",
"sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7",
"created_at": "2017-02-08T16:02:37Z"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
certificate_id = module.params.get('certificate_id', None)
rest = DigitalOceanHelper(module)
base_url = 'certificates?'
if certificate_id is not None:
response = rest.get("%s/%s" % (base_url, certificate_id))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
resp_json = response.json
certificate = resp_json['certificate']
else:
certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates')
module.exit_json(changed=False, data=certificate)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
certificate_id=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
kajgan/e2 | lib/python/Plugins/SystemPlugins/VideoEnhancement/VideoEnhancement.py | 44 | 11758 | from os import path as os_path
from Components.config import config, ConfigSubsection, ConfigSlider, ConfigSelection, ConfigBoolean, ConfigNothing, NoSave
# The "VideoEnhancement" is the interface to /proc/stb/vmpeg/0.
class VideoEnhancement:
firstRun = True
def __init__(self):
self.last_modes_preferred = [ ]
self.createConfig()
def createConfig(self, *args):
config.pep = ConfigSubsection()
config.pep.configsteps = NoSave(ConfigSelection(choices=[1, 5, 10, 25], default = 1))
if os_path.exists("/proc/stb/vmpeg/0/pep_contrast"):
def setContrast(config):
myval = int(config.value * 256)
try:
print "--> setting contrast to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_contrast", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_contrast."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.contrast = ConfigSlider(default=128, limits=(0,256))
config.pep.contrast.addNotifier(setContrast)
else:
config.pep.contrast = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_saturation"):
def setSaturation(config):
myval = int(config.value * 256)
try:
print "--> setting saturation to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_saturation", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_saturaion."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.saturation = ConfigSlider(default=128, limits=(0,256))
config.pep.saturation.addNotifier(setSaturation)
else:
config.pep.saturation = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_hue"):
def setHue(config):
myval = int(config.value * 256)
try:
print "--> setting hue to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_hue", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_hue."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.hue = ConfigSlider(default=128, limits=(0,256))
config.pep.hue.addNotifier(setHue)
else:
config.pep.hue = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_brightness"):
def setBrightness(config):
myval = int(config.value * 256)
try:
print "--> setting brightness to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_brightness", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_brightness."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.brightness = ConfigSlider(default=128, limits=(0,256))
config.pep.brightness.addNotifier(setBrightness)
else:
config.pep.brightness = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_block_noise_reduction"):
def setBlock_noise_reduction(config):
myval = int(config.value)
try:
print "--> setting block_noise_reduction to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_block_noise_reduction", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_block_noise_reduction."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.block_noise_reduction = ConfigSlider(default=0, limits=(0,5))
config.pep.block_noise_reduction.addNotifier(setBlock_noise_reduction)
else:
config.pep.block_noise_reduction = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_mosquito_noise_reduction"):
def setMosquito_noise_reduction(config):
myval = int(config.value)
try:
print "--> setting mosquito_noise_reduction to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_mosquito_noise_reduction", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_mosquito_noise_reduction."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.mosquito_noise_reduction = ConfigSlider(default=0, limits=(0,5))
config.pep.mosquito_noise_reduction.addNotifier(setMosquito_noise_reduction)
else:
config.pep.mosquito_noise_reduction = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_digital_contour_removal"):
def setDigital_contour_removal(config):
myval = int(config.value)
try:
print "--> setting digital_contour_removal to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_digital_contour_removal", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_digital_contour_removal."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.digital_contour_removal = ConfigSlider(default=0, limits=(0,5))
config.pep.digital_contour_removal.addNotifier(setDigital_contour_removal)
else:
config.pep.digital_contour_removal = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_split"):
def setSplitMode(config):
try:
print "--> setting splitmode to:",str(config.value)
f = open("/proc/stb/vmpeg/0/pep_split", "w")
f.write(str(config.value))
f.close()
except IOError:
print "couldn't write pep_split."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.split = ConfigSelection(choices={
"off": _("Off"),
"left": _("Left"),
"right": _("Right")},
default = "off")
config.pep.split.addNotifier(setSplitMode)
else:
config.pep.split = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_sharpness"):
def setSharpness(config):
myval = int(config.value * 256)
try:
print "--> setting sharpness to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_sharpness", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_sharpness."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.sharpness = ConfigSlider(default=0, limits=(0,256))
config.pep.sharpness.addNotifier(setSharpness)
else:
config.pep.sharpness = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_auto_flesh"):
def setAutoflesh(config):
myval = int(config.value)
try:
print "--> setting auto_flesh to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_auto_flesh", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_auto_flesh."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.auto_flesh = ConfigSlider(default=0, limits=(0,4))
config.pep.auto_flesh.addNotifier(setAutoflesh)
else:
config.pep.auto_flesh = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_green_boost"):
def setGreenboost(config):
myval = int(config.value)
try:
print "--> setting green_boost to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_green_boost", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_green_boost."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.green_boost = ConfigSlider(default=0, limits=(0,4))
config.pep.green_boost.addNotifier(setGreenboost)
else:
config.pep.green_boost = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_blue_boost"):
def setBlueboost(config):
myval = int(config.value)
try:
print "--> setting blue_boost to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_blue_boost", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_blue_boost."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.blue_boost = ConfigSlider(default=0, limits=(0,4))
config.pep.blue_boost.addNotifier(setBlueboost)
else:
config.pep.blue_boost = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_dynamic_contrast"):
def setDynamic_contrast(config):
myval = int(config.value)
try:
print "--> setting dynamic_contrast to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_dynamic_contrast", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_dynamic_contrast."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.dynamic_contrast = ConfigSlider(default=0, limits=(0,256))
config.pep.dynamic_contrast.addNotifier(setDynamic_contrast)
else:
config.pep.dynamic_contrast = NoSave(ConfigNothing())
try:
x = config.av.scaler_sharpness.value
except KeyError:
if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_sharpness"):
def setScaler_sharpness(config):
myval = int(config.value)
try:
print "--> setting scaler_sharpness to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_scaler_sharpness", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_scaler_sharpness."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.av.scaler_sharpness = ConfigSlider(default=13, limits=(0,26))
config.av.scaler_sharpness.addNotifier(setScaler_sharpness)
else:
config.av.scaler_sharpness = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/video/hdmi_colorspace") and os_path.exists("/proc/stb/video/hdmi_colorspace_choices"):
def setColour_space(config):
myval = config.value
try:
print "--> setting color_soace to:", myval
f = open("/proc/stb/video/hdmi_colorspace", "w")
f.write(myval)
f.close()
except IOError:
print "couldn't write color_soace."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
file = open("/proc/stb/video/hdmi_colorspace_choices", "r")
modes = file.readline().split()
file.close()
config.pep.color_space = ConfigSelection(modes, modes[0])
config.pep.color_space.addNotifier(setColour_space)
else:
config.pep.color_space = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_vertical_dejagging"):
def setScaler_vertical_dejagging(configElement):
myval = configElement.value and "enable" or "disable"
try:
print "--> setting scaler_vertical_dejagging to: %s" % myval
open("/proc/stb/vmpeg/0/pep_scaler_vertical_dejagging", "w").write(myval)
except IOError:
print "couldn't write pep_scaler_vertical_dejagging."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.scaler_vertical_dejagging = ConfigBoolean(default=False, descriptions = {False: _("Disabled"), True: _("Enabled")} )
config.pep.scaler_vertical_dejagging.addNotifier(setScaler_vertical_dejagging)
else:
config.pep.scaler_vertical_dejagging = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/smooth"):
def setSmooth(configElement):
myval = configElement.value and "enable" or "disable"
try:
print "--> setting smooth to: %s" % myval
open("/proc/stb/vmpeg/0/smooth", "w").write(myval)
except IOError:
print "couldn't write smooth."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.smooth = ConfigBoolean(default=False, descriptions = {False: _("Disabled"), True: _("Enabled")} )
config.pep.smooth.addNotifier(setSmooth)
else:
config.pep.smooth = NoSave(ConfigNothing())
if VideoEnhancement.firstRun:
self.setConfiguredValues()
VideoEnhancement.firstRun = False
def setConfiguredValues(self):
try:
print "--> applying pep values"
f = open("/proc/stb/vmpeg/0/pep_apply", "w")
f.write("1")
f.close()
except IOError:
print "couldn't apply pep values."
VideoEnhancement()
| gpl-2.0 |
oudalab/phyllo | phyllo/extractors/anselmDB.py | 1 | 5827 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
import nltk
from itertools import cycle
nltk.download('punkt')
from nltk import sent_tokenize
anselmSOUP=""
idx = -1
cha_array=[]
suburl = []
verse = []
def parseRes2(soup, title, url, c, author, date, collectiontitle):
chapter = '-'
if url=="http://www.thelatinlibrary.com/anselmepistula.html":
getp = soup.find_all('p')[:-1]
i=len(getp)
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
num = len(getp) - (i - 1)
if p.findAll('br'):
sentn=p.get_text()
num=1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn.strip(), url, 'prose'))
i=0
else:
i=i+1
ptext = p.string
chapter = str(i) # App. not associated with any chapter
# the first element is an empty string.
ptext = ptext[3:]
num=0
for sentn in sent_tokenize(ptext):
num=num+1
if sentn.strip() == 'men.': # textual fix
sentn = "Amen."
chapter = '-'
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn.strip(), url, 'prose'))
else:
getp = soup.find_all('p')[:-1]
geturl=soup.find_all('a', href=True)
global idx
j = 0
#print(getp)
for u in geturl:
if u.get('href') != 'index.html' and u.get('href') != 'classics.html' and u.get('href') != 'christian.html':
suburl.append('http://www.thelatinlibrary.com/anselmproslogion.html'+u.get('href'))
suburl[13]='http://www.thelatinlibrary.com/anselmproslogion.html#capxiii'
suburl[23]='http://www.thelatinlibrary.com/anselmproslogion.html#capxxiii'
suburl.insert(14, 'http://www.thelatinlibrary.com/anselmproslogion.html#capxiv')
suburl.insert(24, 'http://www.thelatinlibrary.com/anselmproslogion.html#capxxiii')
i = len(getp)
for ch in soup.findAll('b'):
chap = ch.string
cha_array.append(''.join([i for i in chap if not i.isdigit()]))
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin'
'internal_navigation']: # these are not part of the main text
continue
except:
pass
if p.string == None:
idx = (idx + 1) % len(suburl)
chapter = cha_array[idx]
nurl = suburl[idx]
if p.string:
j=j+1
num=j
sentn = str(p.string)
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn, nurl, 'prose'))
def main():
# get proper URLs
siteURL = 'http://www.thelatinlibrary.com'
anselmURL = 'http://www.thelatinlibrary.com/anselm.html'
anselmOPEN = urllib.request.urlopen(anselmURL)
anselmSOUP = BeautifulSoup(anselmOPEN, 'html5lib')
textsURL = []
for a in anselmSOUP.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, link))
# remove some unnecessary urls
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
textsURL.remove("http://www.thelatinlibrary.com/christian.html")
logger.info("\n".join(textsURL))
author = anselmSOUP.title.string
author = author.strip()
collectiontitle = anselmSOUP.span.contents[0].strip()
date = anselmSOUP.span.contents[0].strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
title = []
for link in anselmSOUP.findAll('a'):
if (link.get('href') and link.get('href') != 'index.html' and link.get('href') != 'classics.html' and link.get('href') != 'christian.html'):
title.append(link.string)
i=0
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Anselm'")
for u in textsURL:
uOpen = urllib.request.urlopen(u)
gestSoup = BeautifulSoup(uOpen, 'html5lib')
parseRes2(gestSoup, title[i], u, c, author, date, collectiontitle)
i=i+1
if __name__ == '__main__':
main()
| apache-2.0 |
binghongcha08/pyQMD | GWP/2D/1.0.1/comp.py | 29 | 1292 | ##!/usr/bin/python
import numpy as np
import pylab as plt
data = np.genfromtxt(fname='t100/wf.dat')
data1 = np.genfromtxt(fname='t300/wf.dat')
data2 = np.genfromtxt(fname='t500/wf.dat')
data3 = np.genfromtxt(fname='t600/wf.dat')
data00 = np.genfromtxt('../spo_1d/t100')
data01 = np.genfromtxt('../spo_1d/t300')
data02 = np.genfromtxt('../spo_1d/t500')
data03 = np.genfromtxt('../spo_1d/t600')
plt.subplot(2,2,1)
plt.xlim(0.5,2.5)
plt.title('t = 100 a.u.')
plt.plot(data[:,0],data[:,1],'r--',linewidth=2,label='LQF')
plt.plot(data00[:,0],data00[:,1],'k-',linewidth=2, label='Exact')
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.subplot(2,2,2)
plt.title('t = 300 a.u.')
plt.xlim(0.5,2.5)
plt.plot(data1[:,0],data1[:,1],'r--',linewidth=2)
plt.plot(data01[:,0],data01[:,1],'k-',linewidth=2)
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.subplot(2,2,3)
plt.title('t = 500 a.u.')
plt.xlim(0.5,2.5)
plt.plot(data2[:,0],data2[:,1],'r--',linewidth=2)
plt.plot(data02[:,0],data02[:,1],'k-',linewidth=2)
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.subplot(2,2,4)
plt.title('t = 600 a.u.')
plt.xlim(0.5,2.5)
plt.plot(data3[:,0],data3[:,1],'r--',linewidth=2)
plt.plot(data03[:,0],data03[:,1],'k-',linewidth=2)
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.savefig('wft.pdf')
plt.show()
| gpl-3.0 |
SOKP/external_chromium_org | content/test/gpu/gpu_tests/webgl_conformance.py | 26 | 5126 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import os
import sys
import webgl_conformance_expectations
from telemetry import benchmark as benchmark_module
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import page as page_module
from telemetry.page import page_test
conformance_path = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'webgl', 'src', 'sdk', 'tests')
conformance_harness_script = r"""
var testHarness = {};
testHarness._allTestSucceeded = true;
testHarness._messages = '';
testHarness._failures = 0;
testHarness._finished = false;
testHarness._originalLog = window.console.log;
testHarness.log = function(msg) {
testHarness._messages += msg + "\n";
testHarness._originalLog.apply(window.console, [msg]);
}
testHarness.reportResults = function(url, success, msg) {
testHarness._allTestSucceeded = testHarness._allTestSucceeded && !!success;
if(!success) {
testHarness._failures++;
if(msg) {
testHarness.log(msg);
}
}
};
testHarness.notifyFinished = function(url) {
testHarness._finished = true;
};
testHarness.navigateToPage = function(src) {
var testFrame = document.getElementById("test-frame");
testFrame.src = src;
};
window.webglTestHarness = testHarness;
window.parent.webglTestHarness = testHarness;
window.console.log = testHarness.log;
window.onerror = function(message, url, line) {
testHarness.reportResults(null, false, message);
testHarness.notifyFinished(null);
};
"""
def _DidWebGLTestSucceed(tab):
return tab.EvaluateJavaScript('webglTestHarness._allTestSucceeded')
def _WebGLTestMessages(tab):
return tab.EvaluateJavaScript('webglTestHarness._messages')
class WebglConformanceValidator(page_test.PageTest):
def __init__(self):
super(WebglConformanceValidator, self).__init__(attempts=1, max_failures=10)
def ValidateAndMeasurePage(self, page, tab, results):
if not _DidWebGLTestSucceed(tab):
raise page_test.Failure(_WebGLTestMessages(tab))
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--disable-gesture-requirement-for-media-playback',
'--disable-domain-blocking-for-3d-apis',
'--disable-gpu-process-crash-limit'
])
class WebglConformancePage(page_module.Page):
def __init__(self, page_set, test):
super(WebglConformancePage, self).__init__(
url='file://' + test, page_set=page_set, base_dir=page_set.base_dir,
name=('WebglConformance.%s' %
test.replace('/', '_').replace('-', '_').
replace('\\', '_').rpartition('.')[0].replace('.', '_')))
self.script_to_evaluate_on_commit = conformance_harness_script
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'webglTestHarness._finished', timeout_in_seconds=180)
class WebglConformance(benchmark_module.Benchmark):
"""Conformance with Khronos WebGL Conformance Tests"""
test = WebglConformanceValidator
@classmethod
def AddTestCommandLineArgs(cls, group):
group.add_option('--webgl-conformance-version',
help='Version of the WebGL conformance tests to run.',
default='1.0.3')
def CreatePageSet(self, options):
tests = self._ParseTests('00_test_list.txt',
options.webgl_conformance_version)
ps = page_set.PageSet(
user_agent_type='desktop',
serving_dirs=[''],
file_path=conformance_path)
for test in tests:
ps.AddPage(WebglConformancePage(ps, test))
return ps
def CreateExpectations(self, page_set):
return webgl_conformance_expectations.WebGLConformanceExpectations()
@staticmethod
def _ParseTests(path, version=None):
test_paths = []
current_dir = os.path.dirname(path)
full_path = os.path.normpath(os.path.join(conformance_path, path))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified ' +
'does not exist: ' + full_path)
with open(full_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('//') or line.startswith('#'):
continue
line_tokens = line.split(' ')
i = 0
min_version = None
while i < len(line_tokens):
token = line_tokens[i]
if token == '--min-version':
i += 1
min_version = line_tokens[i]
i += 1
if version and min_version and version < min_version:
continue
test_name = line_tokens[-1]
if '.txt' in test_name:
include_path = os.path.join(current_dir, test_name)
test_paths += WebglConformance._ParseTests(
include_path, version)
else:
test = os.path.join(current_dir, test_name)
test_paths.append(test)
return test_paths
| bsd-3-clause |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_03] Data_Collection_Sample/DB access sample code/vtt/sampling_density_VTT.py | 1 | 6262 | import os
import sys
import json
from datetime import datetime
import time
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pylab as pl
import pickle
######
### Configurations
######
UUID_FILE = 'finland_ids.csv'
#DATA_FOLDER = 'VTT_week/'
DATA_FOLDER = 'data_year/'
DATA_EXT = '.csv'
SCRIPT_DIR = os.path.dirname(__file__)
def saveObjectBinary(obj, filename):
with open(filename, "wb") as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def loadObjectBinary(filename):
with open(filename, "rb") as input:
obj = pickle.load(input)
return obj
def group_uuids(uuid_list):
sensors_metadata = []
for uuid in uuid_list:
metadata_filepath = os.path.join(SCRIPT_DIR, 'metadata/meta_' + uuid + '.dat')
### open metadata file ###
with open(str(metadata_filepath)) as f:
#metadata = f.read().strip()
#sensors_metadata.append(metadata)
sensor_metadata = json.load(f)
sensors_metadata.append((uuid, sensor_metadata[0]['Path']))
sensors_metadata.sort(key=lambda tup: tup[1])
#print sensors_metadata
return sensors_metadata
### delta_t in ms ; max_sr in ms ###
### start_time = "2013/11/01-00:00:00"
### end_time = "2013/11/07-23:59:59"
def load_uuid_list():
uuid_list = []
uuid_filepath = os.path.join(SCRIPT_DIR, UUID_FILE)
temp_uuid_list = open(uuid_filepath).readlines()
for line in temp_uuid_list:
tokens = line.strip().split(',')
if len(tokens) == 0:
continue
uuid_list.append(tokens[0].strip())
return uuid_list
def print_readings(uuid):
sensor_filepath = os.path.join(SCRIPT_DIR, 'readings/' + uuid + '.dat')
sensors_readings = []
with open(str(sensor_filepath)) as f:
# sensors_metadata.append(f.read())
json_readings = json.load(f)
sensors_readings = json_readings[0]['Readings']
if len(sensors_readings) == 0:
return
for pair in sensors_readings:
if pair[1] is None:
continue
ts = pair[0]
readable_ts = datetime.fromtimestamp(int(ts) / 1000).strftime('%Y-%m-%d %H:%M:%S')
reading = pair[1]
print str(ts), str(readable_ts), reading
def compute_sampling_density(uuid, start_time, end_time, delta_t, max_sr):
### for testing ###
#start_time = "2013/11/01-00:00:00"
#end_time = "2013/11/07-23:59:59"
start_ts = int(time.mktime(datetime.strptime(start_time, "%Y/%m/%d-%H:%M:%S").timetuple()) * 1000)
end_ts = int(time.mktime(datetime.strptime(end_time, "%Y/%m/%d-%H:%M:%S").timetuple()) * 1000)
if (end_ts - start_ts) * 1.0 / delta_t == int ( math.floor((end_ts - start_ts) / delta_t)):
num_intervals = int ( (end_ts - start_ts) / delta_t) + 1
else:
num_intervals = int(math.ceil((end_ts - start_ts) * 1.0 / delta_t))
sampling_density = [0] * num_intervals
###### open reading of uuid - BERKELEY SDH BUILDING ######
# sensor_filepath = os.path.join(SCRIPT_DIR, 'readings/' + uuid + '.dat')
# with open(str(sensor_filepath)) as f:
# # sensors_metadata.append(f.read())
# json_readings = json.load(f)
# sensors_readings = json_readings[0]['Readings']
# if len(sensors_readings) == 0:
# return sampling_density
###### open reading of uuid - VTT FINLAND ######
sensor_filepath = os.path.join(SCRIPT_DIR, DATA_FOLDER + uuid + DATA_EXT)
lines = open(str(sensor_filepath)).readlines()
sensors_readings = []
for line in lines:
pair = []
if line == "":
continue
tokens = line.strip().split(',')
if len(tokens) < 2:
continue
#[curr_date, curr_time] = tokens[0].split(' ')
#print curr_date.strip() + '-' + curr_time.strip()
ts = int(time.mktime(datetime.strptime(tokens[0].strip(), "%Y-%m-%d %H:%M:%S").timetuple()) * 1000)
reading = float(tokens[1].strip())
pair.append(ts)
pair.append(reading)
#print tokens[0].strip(), str(ts), str(reading)
# sensors_metadata.append(f.read())
###for pair in sensors_readings:
curr_ts = int(pair[0])
#reading = float(pair[1])
if curr_ts < start_ts:
continue
if curr_ts > end_ts:
break
if pair[1] is None:
continue
curr_reading_index = int( (curr_ts - start_ts) / delta_t)
sampling_density[curr_reading_index] = sampling_density[curr_reading_index] + 1
### compute density
max_num_samples = delta_t / max_sr
for i in range(0, num_intervals):
sampling_density[i] = sampling_density[i] * 1.0 / max_num_samples
return sampling_density
def compute_sampling_density_matrix(start_time, end_time, delta_t, max_sr):
uuid_list = load_uuid_list()
uuid_list = uuid_list[0:1000]
sampling_density_matrix = []
for uuid in uuid_list:
sampling_density = compute_sampling_density(uuid, start_time, end_time, delta_t, max_sr)
if len(sampling_density) == 0:
continue
sampling_density_matrix.append(sampling_density)
return sampling_density_matrix
def visualize_density_matrix(sampling_density_matrix):
plt.imshow(sampling_density_matrix, interpolation="nearest", cmap=pl.cm.spectral)
pl.savefig('density.png', bbox_inches='tight')
######
### Example
######
#uuid = "GW1.HA1_AS_TE_AH_FM"
start_time = "2013/11/01-00:00:00"
end_time = "2013/11/07-23:59:59"
max_sr = 300000 ### 1000 ms = 1s, 5mins
delta_t = 1200000 ### ms ; 20 mins
sys_argv = sys.argv
if len(sys_argv) == 5:
start_time = sys_argv[1]
end_time = sys_argv[2]
delta_t = int(sys_argv[3])
max_sr = int(sys_argv[4])
### compute sampling density matrix and visualize
sampling_density_matrix = np.asarray(compute_sampling_density_matrix(start_time, end_time, delta_t, max_sr))
visualize_density_matrix(sampling_density_matrix)
| gpl-2.0 |
jobiols/odoomrp-wip | mrp_byproduct_operations/__openerp__.py | 27 | 1453 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos ([email protected]) Date: 29/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "MRP byproduct Operations",
"version": "1.0",
"description": """
This module allows to add the operation on BoM where the secondary products
will be produced.
""",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
'website': "http://www.odoomrp.com",
"depends": ['mrp_byproduct', 'mrp_operations_extension'],
"category": "Manufacturing",
"data": ['views/mrp_bom_view.xml',
],
"installable": True
}
| agpl-3.0 |
echristophe/lic | src/RectanglePacker.py | 5 | 11617 | """This library is free software; you can redistribute it and/or
modify it under the terms of the IBM Common Public License as
published by the IBM Corporation; either version 1.0 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
IBM Common Public License for more details.
You should have received a copy of the IBM Common Public
License along with this library
"""
from bisect import bisect_left
class OutOfSpaceError(Exception): pass
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __cmp__(self, other):
"""Compares the starting position of height slices"""
return self.x - other.x
class RectanglePacker(object):
"""Base class for rectangle packing algorithms
By uniting all rectangle packers under this common base class, you can
easily switch between different algorithms to find the most efficient or
performant one for a given job.
An almost exhaustive list of packing algorithms can be found here:
http://www.csc.liv.ac.uk/~epa/surveyhtml.html"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
self.packingAreaWidth = packingAreaWidth
self.packingAreaHeight = packingAreaHeight
def Pack(self, rectangleWidth, rectangleHeight):
"""Allocates space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns the location at which the rectangle has been placed"""
return self.TryPack(rectangleWidth, rectangleHeight)
def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
raise NotImplementedError
class CygonRectanglePacker(RectanglePacker):
"""
Packer using a custom algorithm by Markus 'Cygon' Ewald
Algorithm conceived by Markus Ewald (cygon at nuclex dot org), though
I'm quite sure I'm not the first one to come up with it :)
The algorithm always places rectangles as low as possible in the packing
area. So, for any new rectangle that is to be added, the packer has to
determine the X coordinate at which the rectangle can have the lowest
overall height without intersecting any other rectangles.
To quickly discover these locations, the packer uses a sophisticated
data structure that stores the upper silhouette of the packing area. When
a new rectangle needs to be added, only the silouette edges need to be
analyzed to find the position where the rectangle would achieve the lowest"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
# Stores the height silhouette of the rectangles
self.heightSlices = []
# At the beginning, the packing area is a single slice of height 0
self.heightSlices.append(Point(0,0))
def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
# If the rectangle is larger than the packing area in any dimension,
# it will never fit!
if rectangleWidth > self.packingAreaWidth or rectangleHeight > self.packingAreaHeight:
return None
# Determine the placement for the new rectangle
placement = self.tryFindBestPlacement(rectangleWidth, rectangleHeight)
# If a place for the rectangle could be found, update the height slice
# table to mark the region of the rectangle as being taken.
if placement:
self.integrateRectangle(placement.x, rectangleWidth, placement.y + rectangleHeight)
return placement
def tryFindBestPlacement(self, rectangleWidth, rectangleHeight):
"""Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None"""
# Slice index, vertical position and score of the best placement we
# could find
bestSliceIndex = -1 # Slice index where the best placement was found
bestSliceY = 0 # Y position of the best placement found
# lower == better!
bestScore = self.packingAreaWidth * self.packingAreaHeight
# This is the counter for the currently checked position. The search
# works by skipping from slice to slice, determining the suitability
# of the location for the placement of the rectangle.
leftSliceIndex = 0
# Determine the slice in which the right end of the rectangle is located
rightSliceIndex = bisect_left(self.heightSlices, Point(rectangleWidth, 0))
if rightSliceIndex < 0:
rightSliceIndex = ~rightSliceIndex
while rightSliceIndex <= len(self.heightSlices):
# Determine the highest slice within the slices covered by the
# rectangle at its current placement. We cannot put the rectangle
# any lower than this without overlapping the other rectangles.
highest = self.heightSlices[leftSliceIndex].y
for index in xrange(leftSliceIndex + 1, rightSliceIndex):
if self.heightSlices[index].y > highest:
highest = self.heightSlices[index].y
# Only process this position if it doesn't leave the packing area
if highest + rectangleHeight < self.packingAreaHeight:
score = highest
if score < bestScore:
bestSliceIndex = leftSliceIndex
bestSliceY = highest
bestScore = score
# Advance the starting slice to the next slice start
leftSliceIndex += 1
if leftSliceIndex >= len(self.heightSlices):
break
# Advance the ending slice until we're on the proper slice again,
# given the new starting position of the rectangle.
rightRectangleEnd = self.heightSlices[leftSliceIndex].x + rectangleWidth
while rightSliceIndex <= len(self.heightSlices):
if rightSliceIndex == len(self.heightSlices):
rightSliceStart = self.packingAreaWidth
else:
rightSliceStart = self.heightSlices[rightSliceIndex].x
# Is this the slice we're looking for?
if rightSliceStart > rightRectangleEnd:
break
rightSliceIndex += 1
# If we crossed the end of the slice array, the rectangle's right
# end has left the packing area, and thus, our search ends.
if rightSliceIndex > len(self.heightSlices):
break
# Return the best placement we found for this rectangle. If the
# rectangle didn't fit anywhere, the slice index will still have its
# initialization value of -1 and we can report that no placement
# could be found.
if bestSliceIndex == -1:
return None
else:
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY)
def integrateRectangle(self, left, width, bottom):
"""Integrates a new rectangle into the height slice table
left: Position of the rectangle's left side
width: Width of the rectangle
bottom: Position of the rectangle's lower side"""
# Find the first slice that is touched by the rectangle
startSlice = bisect_left(self.heightSlices, Point(left, 0))
# Did we score a direct hit on an existing slice start?
if startSlice >= 0:
# We scored a direct hit, so we can replace the slice we have hit
firstSliceOriginalHeight = self.heightSlices[startSlice].y
self.heightSlices[startSlice] = Point(left, bottom)
else: # No direct hit, slice starts inside another slice
# Add a new slice after the slice in which we start
startSlice = ~startSlice
firstSliceOriginalHeight = self.heightSlices[startSlice - 1].y
self.heightSlices.insert(startSlice, Point(left, bottom))
right = left + width
startSlice += 1
# Special case, the rectangle started on the last slice, so we cannot
# use the start slice + 1 for the binary search and the possibly
# already modified start slice height now only remains in our temporary
# firstSliceOriginalHeight variable
if startSlice >= len(self.heightSlices):
# If the slice ends within the last slice (usual case, unless it
# has the exact same width the packing area has), add another slice
# to return to the original height at the end of the rectangle.
if right < self.packingAreaWidth:
self.heightSlices.append(Point(right, firstSliceOriginalHeight))
else: # The rectangle doesn't start on the last slice
endSlice = bisect_left(self.heightSlices, Point(right,0), \
startSlice, len(self.heightSlices))
# Another direct hit on the final slice's end?
if endSlice > 0:
del self.heightSlices[startSlice:endSlice]
else: # No direct hit, rectangle ends inside another slice
# Make index from negative bisect_left() result
endSlice = ~endSlice
# Find out to which height we need to return at the right end of
# the rectangle
if endSlice == startSlice:
returnHeight = firstSliceOriginalHeight
else:
returnHeight = self.heightSlices[endSlice - 1].y
# Remove all slices covered by the rectangle and begin a new
# slice at its end to return back to the height of the slice on
# which the rectangle ends.
del self.heightSlices[startSlice:endSlice]
if right < self.packingAreaWidth:
self.heightSlices.insert(startSlice, Point(right, returnHeight))
| gpl-3.0 |
dhelbegor/omelette | project_name/settings/base.py | 1 | 3526 | """
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
]
LOCAL_APPS = [
'apps.core',
]
INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'database.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| mit |
bohanapp/gaoyuan.org | node_modules/hexo-renderer-scss/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
dahlstrom-g/intellij-community | python/testData/refactoring/move/optimizeImportsAfterMoveInvalidatesMembersToBeMoved/after/src/src.py | 22 | 2358 | # -*- coding: utf-8 -*-
# (c) 2017 Tuomas Airaksinen
#
# This file is part of Serviceform.
#
# Serviceform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Serviceform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Serviceform. If not, see <http://www.gnu.org/licenses/>.
import datetime
import string
import logging
from enum import Enum
from typing import Tuple, Set, Optional, Sequence, Iterator, Iterable, TYPE_CHECKING
from colorful.fields import RGBColorField
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.db.models import Prefetch
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from guardian.shortcuts import get_users_with_perms
from select2 import fields as select2_fields
from serviceform.tasks.models import Task
from .. import emails, utils
from ..utils import ColorStr
from .mixins import CopyMixin
from .people import Participant, ResponsibilityPerson
from .email import EmailTemplate
from .participation import QuestionAnswer
if TYPE_CHECKING:
from .participation import ParticipationActivity, ParticipationActivityChoice
local_tz = timezone.get_default_timezone()
logger = logging.getLogger(__name__)
def imported_symbols_anchor():
print(RGBColorField, settings, GenericRelation, Prefetch, render_to_string, reverse, format_html,
get_users_with_perms, select2_fields, Task, emails, CopyMixin, Participant, ResponsibilityPerson,
EmailTemplate, QuestionAnswer, ParticipationActivity, ParticipationActivityChoice, datetime, Enum, string,
Tuple, Set, Optional, Sequence, Iterator, Iterable, _, cached_property, models, utils, ColorStr)
| apache-2.0 |
globau/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_treewalkers.py | 429 | 13692 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import unittest
import warnings
from difflib import unified_diff
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError("Node type not supported: " + str(node.nodeType))
treeTypes = {
"DOM": {"builder": treebuilders.getTreeBuilder("dom"),
"walker": treewalkers.getTreeWalker("dom")},
"PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
"adapter": PullDOMAdapter,
"walker": treewalkers.getTreeWalker("pulldom")},
}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import lxml.etree as ElementTree # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml_native'] = \
{"builder": treebuilders.getTreeBuilder("lxml"),
"walker": treewalkers.getTreeWalker("lxml")}
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
except ImportError:
pass
else:
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker("dom")(tree):
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if text is None:
text = token["data"]
else:
text += token["data"]
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
treeTypes["genshi"] = \
{"builder": treebuilders.getTreeBuilder("dom"),
"adapter": GenshiAdapter,
"walker": treewalkers.getTreeWalker("genshi")}
def concatenateCharacterTokens(tokens):
charactersToken = None
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if charactersToken is None:
charactersToken = {"type": "Characters", "data": token["data"]}
else:
charactersToken["data"] += token["data"]
else:
if charactersToken is not None:
yield charactersToken
charactersToken = None
yield token
if charactersToken is not None:
yield charactersToken
def convertTokens(tokens):
output = []
indent = 0
for token in concatenateCharacterTokens(tokens):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
if (token["namespace"] and
token["namespace"] != constants.namespaces["html"]):
if token["namespace"] in constants.prefixes:
name = constants.prefixes[token["namespace"]]
else:
name = token["namespace"]
name += " " + token["name"]
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
attrs = token["data"]
if attrs:
# TODO: Remove this if statement, attrs should always exist
for (namespace, name), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
outputname = constants.prefixes[namespace]
else:
outputname = namespace
outputname += " " + name
else:
outputname = name
output.append("%s%s=\"%s\"" % (" " * indent, outputname, value))
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent, token["name"],
token["publicId"],
token["systemId"] and token["systemId"] or ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent, token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type in ("Characters", "SpaceCharacters"):
output.append("%s\"%s\"" % (" " * indent, token["data"]))
else:
pass # TODO: what to do with errors?
return "\n".join(output)
import re
attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+", re.M)
def sortattrs(x):
lines = x.group(0).split("\n")
lines.sort()
return "\n".join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': 'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
]
for treeName, treeCls in treeTypes.items():
p = html5parser.HTMLParser(tree=treeCls["builder"])
document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
document = treeCls.get("adapter", lambda x: x)(document)
output = treeCls["walker"](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEqual(expectedToken, outputToken)
def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
warnings.resetwarnings()
warnings.simplefilter("error")
try:
p = html5parser.HTMLParser(tree=treeClass["builder"])
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except constants.DataLossWarning:
# Ignore testcases we know we don't pass
return
document = treeClass.get("adapter", lambda x: x)(document)
try:
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
diff = "".join(unified_diff([line + "\n" for line in expected.splitlines()],
[line + "\n" for line in output.splitlines()],
"Expected", "Received"))
assert expected == output, "\n".join([
"", "Input:", input,
"", "Expected:", expected,
"", "Received:", output,
"", "Diff:", diff,
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n")
for treeName, treeCls in treeTypes.items():
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ("data", "errors",
"document-fragment",
"document")]
errors = errors.split("\n")
yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
def set_attribute_on_first_child(docfrag, name, value, treeName):
"""naively sets an attribute on the first child of the document
fragment passed in"""
setter = {'ElementTree': lambda d: d[0].set,
'DOM': lambda d: d.firstChild.setAttribute}
setter['cElementTree'] = setter['ElementTree']
try:
setter.get(treeName, setter['DOM'])(docfrag)(name, value)
except AttributeError:
setter['ElementTree'](docfrag)(name, value)
def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
"""tests what happens when we add attributes to the intext"""
treeName, treeClass = tree
parser = html5parser.HTMLParser(tree=treeClass["builder"])
document = parser.parseFragment(intext)
for nom, val in attrs_to_add:
set_attribute_on_first_child(document, nom, val, treeName)
document = treeClass.get("adapter", lambda x: x)(document)
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
if not output in expected:
raise AssertionError("TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s" % (treeName, expected, output))
def test_treewalker_six_mix():
"""Str/Unicode mix. If str attrs added to tree"""
# On Python 2.x string literals are of type str. Unless, like this
# file, the programmer imports unicode_literals from __future__.
# In that case, string literals become objects of type unicode.
# This test simulates a Py2 user, modifying attributes on a document
# fragment but not using the u'' syntax nor importing unicode_literals
sm_tests = [
('<a href="http://example.com">Example</a>',
[(str('class'), str('test123'))],
'<a>\n class="test123"\n href="http://example.com"\n "Example"'),
('<link href="http://example.com/cow">',
[(str('rel'), str('alternate'))],
'<link>\n href="http://example.com/cow"\n rel="alternate"\n "Example"')
]
for tree in treeTypes.items():
for intext, attrs, expected in sm_tests:
yield runTreewalkerEditTest, intext, expected, attrs, tree
| mpl-2.0 |
lsinfo/odoo | addons/calendar/contacts.py | 389 | 1414 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class calendar_contacts(osv.osv):
_name = 'calendar.contacts'
_columns = {
'user_id': fields.many2one('res.users','Me'),
'partner_id': fields.many2one('res.partner','Employee',required=True, domain=[]),
'active':fields.boolean('active'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'active' : True,
} | agpl-3.0 |
TeamExodus/external_chromium_org | tools/memory_inspector/memory_inspector/data/serialization.py | 89 | 4465 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module handles the JSON de/serialization of the core classes.
This is needed for both long term storage (e.g., loading/storing traces to local
files) and for short term data exchange (AJAX with the HTML UI).
The rationale of these serializers is to store data in an efficient (i.e. avoid
to store redundant information) and intelligible (i.e. flatten the classes
hierarchy keeping only the meaningful bits) format.
"""
import json
from memory_inspector.classification import results
from memory_inspector.core import backends
from memory_inspector.core import memory_map
from memory_inspector.core import native_heap
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
class Encoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, memory_map.Map):
return [entry.__dict__ for entry in obj.entries]
if isinstance(obj, symbol.Symbols):
return obj.symbols
if isinstance(obj, (symbol.Symbol, symbol.SourceInfo)):
return obj.__dict__
if isinstance(obj, native_heap.NativeHeap):
# Just keep the list of (distinct) stack frames from the index. Encoding
# it as a JSON dictionary would be redundant.
return {'stack_frames': obj.stack_frames.values(),
'allocations': obj.allocations}
if isinstance(obj, native_heap.Allocation):
return obj.__dict__
if isinstance(obj, stacktrace.Stacktrace):
# Keep just absolute addrs of stack frames. The full frame details will be
# kept in (and rebuilt from) |native_heap.NativeHeap.stack_frames|. See
# NativeHeapDecoder below.
return [frame.address for frame in obj.frames]
if isinstance(obj, stacktrace.Frame):
# Strip out the symbol information from stack frames. Symbols are stored
# (and will be loaded) separately. Rationale: different heap snapshots can
# share the same symbol db. Serializing the symbol information for each
# stack frame for each heap snapshot is a waste.
return {'address': obj.address,
'exec_file_rel_path': obj.exec_file_rel_path,
'offset': obj.offset}
if isinstance(obj, (backends.DeviceStats, backends.ProcessStats)):
return obj.__dict__
if isinstance(obj, results.AggreatedResults):
return {'keys': obj.keys, 'buckets': obj.total}
if isinstance(obj, results.Bucket):
return {obj.rule.name : {'values': obj.values, 'children': obj.children}}
return json.JSONEncoder.default(self, obj)
class MmapDecoder(json.JSONDecoder):
def decode(self, json_str): # pylint: disable=W0221
d = super(MmapDecoder, self).decode(json_str)
mmap = memory_map.Map()
for entry_dict in d:
entry = memory_map.MapEntry(**entry_dict)
mmap.Add(entry)
return mmap
class SymbolsDecoder(json.JSONDecoder):
def decode(self, json_str): # pylint: disable=W0221
d = super(SymbolsDecoder, self).decode(json_str)
symbols = symbol.Symbols()
for sym_key, sym_dict in d.iteritems():
sym = symbol.Symbol(sym_dict['name'])
for source_info in sym_dict['source_info']:
sym.AddSourceLineInfo(**source_info)
symbols.symbols[sym_key] = sym
return symbols
class NativeHeapDecoder(json.JSONDecoder):
def decode(self, json_str): # pylint: disable=W0221
d = super(NativeHeapDecoder, self).decode(json_str)
nh = native_heap.NativeHeap()
# First load and rebuild the stack_frame index.
for frame_dict in d['stack_frames']:
frame = nh.GetStackFrame(frame_dict['address'])
frame.SetExecFileInfo(frame_dict['exec_file_rel_path'],
frame_dict['offset'])
# Then load backtraces (reusing stack frames from the index above).
for alloc_dict in d['allocations']:
stack_trace = stacktrace.Stacktrace()
for absolute_addr in alloc_dict['stack_trace']:
stack_trace.Add(nh.GetStackFrame(absolute_addr))
nh.Add(native_heap.Allocation(start=alloc_dict['start'],
size=alloc_dict['size'],
stack_trace=stack_trace,
flags=alloc_dict['flags'],
resident_size=alloc_dict['resident_size']))
return nh | bsd-3-clause |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Lib/distutils/command/check.py | 98 | 5557 | """distutils.command.check
Implements the Distutils 'check' command.
"""
__revision__ = "$Id$"
from distutils.core import Command
from distutils.dist import PKG_INFO_ENCODING
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from StringIO import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except ImportError:
# docutils is not installed
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
if not isinstance(data, unicode):
data = data.decode(PKG_INFO_ENCODING)
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser().get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError:
reporter.messages.append((-1, 'Could not finish the parsing.',
'', {}))
return reporter.messages
| mit |
thaim/ansible | lib/ansible/modules/network/fortios/fortios_system_replacemsg_icap.py | 13 | 10063 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_icap
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_replacemsg feature and icap category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_replacemsg_icap:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_icap:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_icap:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_icap_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_icap(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_icap_data = data['system_replacemsg_icap']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_icap_data(system_replacemsg_icap_data))
if state == "present":
return fos.set('system.replacemsg',
'icap',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'icap',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_icap']:
resp = system_replacemsg_icap(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_replacemsg_icap": {
"required": False, "type": "dict", "default": None,
"options": {
"buffer": {"required": False, "type": "str"},
"format": {"required": False, "type": "str",
"choices": ["none", "text", "html",
"wml"]},
"header": {"required": False, "type": "str",
"choices": ["none", "http", "8bit"]},
"msg_type": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| mit |
mydongistiny/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver.py | 21 | 4523 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.port import driver
import time
import shutil
class BrowserTestDriver(driver.Driver):
"""Object for running print preview test(s) using browser_tests."""
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
"""Invokes the constructor of driver.Driver."""
super(BrowserTestDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
def start(self, pixel_tests, per_test_args, deadline):
"""Same as Driver.start() however, it has an extra step. It waits for
a path to a file to be used for stdin to be printed by the browser test.
If a path is found by the deadline test test will open the file and
assign it to the stdin of the process that is owned by this driver's
server process.
"""
# FIXME(ivandavid): Need to handle case where the layout test doesn't
# get a file name.
new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
if not self._server_process or new_cmd_line != self._current_cmd_line:
self._start(pixel_tests, per_test_args)
self._run_post_start_tasks()
self._open_stdin_path(deadline)
# Gets the path of the directory that the file for stdin communication is
# in. Since the browser test cannot clean it up, the layout test framework
# will. Everything the browser test uses is stored in the same directory as
# the stdin file, so deleting that directory recursively will remove all the
# other temp data, like the printed pdf. This function assumes the correct
# file path is sent. It won't delete files with only one component to avoid
# accidentally deleting files like /tmp.
def _open_stdin_path(self, deadline, test=False):
# FIXME(ivandavid): Come up with a way to test & see what happens when
# the file can't be opened.
path, found = self._read_stdin_path(deadline)
if found:
if test == False:
self._server_process._proc.stdin = open(path, 'wb', 0)
def _read_stdin_path(self, deadline):
# return (stdin_path, bool)
block = self._read_block(deadline)
if block.stdin_path:
return (block.stdin_path, True)
return (None, False)
def cmd_line(self, pixel_tests, per_test_args):
"""Command line arguments to run the browser test."""
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
cmd.append('--gtest_filter=PrintPreviewPdfGeneratedBrowserTest.MANUAL_LayoutTestDriver')
cmd.append('--run-manual')
cmd.append('--single_process')
cmd.extend(per_test_args)
cmd.extend(self._port.get_option('additional_drt_flag', []))
return cmd
def stop(self):
if self._server_process:
self._server_process.write('QUIT')
super(BrowserTestDriver, self).stop(self._port.driver_stop_timeout())
| bsd-3-clause |
amyliu345/zulip | zerver/views/events_register.py | 10 | 2542 | from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from typing import Text
from typing import Iterable, Optional, Sequence
from zerver.lib.actions import do_events_register
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.models import UserProfile
def _default_all_public_streams(user_profile, all_public_streams):
# type: (UserProfile, Optional[bool]) -> bool
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
# type: (UserProfile, Iterable[Sequence[Text]]) -> Iterable[Sequence[Text]]
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [['stream', default_stream.name]]
return narrow
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, bool, Optional[bool]) -> HttpResponse
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
# type: (HttpRequest, UserProfile, bool, Optional[bool], Optional[Iterable[str]], Iterable[Sequence[Text]], int) -> HttpResponse
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
| apache-2.0 |
pbrazdil/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/gc.py | 146 | 2038 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class GC(webapp.RequestHandler):
def get(self):
statuses = QueueStatus.all().order("-date")
seen_queues = set()
for status in statuses:
if status.active_patch_id or status.active_bug_id:
continue
if status.queue_name in seen_queues:
status.delete()
seen_queues.add(status.queue_name)
self.response.out.write("Done!")
| bsd-3-clause |
benlangmuir/swift | utils/type-layout-fuzzer.py | 32 | 4228 | #!/usr/bin/env python
# This script outputs a Swift source with randomly-generated type definitions,
# which can be used for ABI or layout algorithm fuzzing.
# TODO: generate types with generics, existentials, compositions
from __future__ import print_function
import random
import sys
maxDepth = 5
maxMembers = 5
typesDefined = []
classesDefined = []
nextToDefine = 0
objcInterop = False
if len(sys.argv) >= 2:
if sys.argv[1] == "--objc":
objcInterop = True
if sys.argv[1] == "--help":
print("Usage: " + sys.argv[0] + " [--objc]", file=sys.stderr)
print("", file=sys.stderr)
print(" --objc Include ObjC-interop types", file=sys.stderr)
sys.exit(2)
random.seed()
if objcInterop:
print("import Foundation")
print()
def randomTypeList(depth):
count = random.randint(0, maxMembers)
result = "("
for i in xrange(count):
if i > 0:
result += ", "
result += randomTypeReference(depth + 1)
result += ")"
return result
def randomTypeReference(depth):
def nominal():
global typesDefined
allowNew = depth < maxDepth
bound = len(classesDefined) if allowNew else len(classesDefined) - 1
which = random.randint(0, bound)
if which < len(classesDefined):
return classesDefined[which]
newName = "T" + str(len(typesDefined))
def defineRandomRelatedType(name):
defineRandomNominalType(name, depth)
typesDefined.append((newName, defineRandomRelatedType))
return newName
def tuple():
return randomTypeList(depth + 1)
def metatype():
return "(" + randomTypeReference(depth + 1) + ").Type"
def leaf():
leaves = ["Int", "String", "Int8", "Int16", "Int32", "Int64",
"(() -> ())", "(@convention(c) () -> ())", "AnyObject"]
if objcInterop:
leaves += ["NSObject", "(@convention(block) () -> ())"]
return random.choice(leaves)
if depth < maxDepth:
kinds = [nominal, tuple, metatype, leaf, leaf, leaf, leaf, leaf]
else:
kinds = [leaf]
return random.choice(kinds)()
def defineRandomFields(depth, basename):
numMembers = random.randint(0, maxMembers)
for i in xrange(numMembers):
print(" var " + basename + str(i) + ": " +
randomTypeReference(depth + 1))
def defineRandomClass(name, depth):
global classesDefined
classesDefined.append(name)
print("class " + name, end="")
def inheritNSObject():
print(": NSObject", end="")
def inheritsOtherClass():
print(": ", end="")
name = "T" + str(len(typesDefined))
def defineRandomBaseClass(name):
defineRandomClass(name, depth)
typesDefined.append((name, defineRandomBaseClass))
print(name, end="")
def inheritsNothing():
pass
inheritances = [inheritsNothing]
if depth == 0:
# The contents of classes are interesting only for top-level type
inheritances += [inheritsOtherClass]
if objcInterop:
inheritances += [inheritNSObject]
random.choice(inheritances)()
print(" {")
# Prevent errors about lack of initializers
print(" init(" + name + ": ()) { fatalError() }")
# The contents of classes are interesting only for top-level type
if depth == 0:
defineRandomFields(depth, "x" + name)
print("}")
print()
def defineRandomNominalType(name, depth=0):
def struct():
print("struct " + name + " {")
defineRandomFields(depth, "x")
print("}")
print()
def clas():
defineRandomClass(name, depth)
def enum():
# TODO: indirect cases
print("enum " + name + " {")
numCases = random.randint(0, maxMembers)
for i in xrange(numCases):
print(" case x" + str(i) + randomTypeList(depth + 1))
print("}")
print()
kinds = [struct, clas, enum]
return random.choice(kinds)()
typesDefined.append(("Generated", defineRandomNominalType))
while nextToDefine < len(typesDefined):
name, definer = typesDefined[nextToDefine]
definer(name)
nextToDefine += 1
| apache-2.0 |
Bismarrck/tensorflow | tensorflow/python/ops/linalg/linear_operator.py | 3 | 33838 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
@six.add_metaclass(abc.ABCMeta)
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`matmul(x, adjoint=False, name="matmul")` a subclass should implement
`_matmul(x, adjoint=False)`.
#### Performance contract
Subclasses should only implement the assert methods
(e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
time.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` subclasses should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some subclasses may not support batching.
Examples:
`x` is a batch matrix with compatible shape for `matmul` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_abs_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.matmul(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
`solve` will have to be this type.
graph_parents: Python list of graph prerequisites of this `LinearOperator`
Typically tensors that are passed during initialization.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
if is_non_singular:
if is_square is False:
raise ValueError("A non-singular matrix is always square.")
is_square = True
if is_self_adjoint:
if is_square is False:
raise ValueError("A self-adjoint matrix is always square.")
is_square = True
self._is_square_set_or_implied_by_hints = is_square
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=((values or []) + self._graph_parents)) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
auto_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_or_implied_by_hints is False and auto_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_or_implied_by_hints is None:
return auto_square_check
return self._is_square_set_or_implied_by_hints
@abc.abstractmethod
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
@abc.abstractmethod
def _shape_tensor(self):
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
return linear_operator_util.shape_tensor(self.shape.as_list())
else:
return self._shape_tensor()
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.batch_shape.is_fully_defined():
return linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
return self.shape_tensor()[:-2]
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.tensor_rank is not None:
return ops.convert_to_tensor(self.tensor_rank)
else:
return array_ops.size(self.shape_tensor())
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.rank is None:
return tensor_shape.Dimension(None)
else:
return self.shape.dims[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-1]
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.dims:
return self.shape.dims[-2]
else:
return tensor_shape.Dimension(None)
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.range_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-2]
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
def _max_condition_number_to_be_non_singular(self):
"""Return the maximum condition number that we consider nonsingular."""
with ops.name_scope("max_nonsingular_condition_number"):
dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
eps = math_ops.cast(
math_ops.reduce_max([
100.,
math_ops.cast(self.range_dimension_tensor(), self.dtype),
math_ops.cast(self.domain_dimension_tensor(), self.dtype)
]), self.dtype) * dtype_eps
return 1. / eps
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular.
This operator is considered non-singular if
```
ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
eps := np.finfo(self.dtype.as_numpy_dtype).eps
```
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is singular.
"""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
"""Default implementation of _assert_positive_definite."""
logging.warn(
"Using (possibly slow) default implementation of "
"assert_positive_definite."
" Requires conversion to a dense matrix and O(N^3) operations.")
# If the operator is self-adjoint, then checking that
# Cholesky decomposition succeeds + results in positive diag is necessary
# and sufficient.
if self.is_self_adjoint:
return check_ops.assert_positive(
array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
message="Matrix was not positive definite.")
# We have no generic check for positive definite.
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
dense = self.to_dense()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint.
Here we check that this operator is *exactly* equal to its hermitian
transpose.
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not self-adjoint.
"""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg))
@abc.abstractmethod
def _matmul(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_matmul is not implemented.")
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as
`self`. See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`.
"""
if isinstance(x, LinearOperator):
if adjoint or adjoint_arg:
raise ValueError(".matmul not supported with adjoints.")
if (x.range_dimension is not None and
self.domain_dimension is not None and
x.range_dimension != self.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(self.domain_dimension, x.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(self, x)
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
x.get_shape()[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
x_mat = array_ops.expand_dims(x, axis=-1)
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
`x` is treated as a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(x.get_shape()[-1])
return self._matvec(x, adjoint=adjoint)
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self.to_dense())
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
if self.is_square is False:
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linear_operator_util.cholesky_solve_with_broadcast(
linalg_ops.cholesky(self.to_dense()), rhs)
return linear_operator_util.matrix_solve_with_broadcast(
self.to_dense(), rhs, adjoint=adjoint)
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
`rhs` is treated like a [batch] matrix meaning for every set of leading
dimensions, the last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
"""Default implementation of _solvevec."""
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator.
`rhs` is treated like a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector. See class docstring
for definition of compatibility regarding batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[-1])
return self._solvevec(rhs, adjoint=adjoint)
def cholesky(self, name="cholesky"):
"""Returns a Cholesky factor as a `LinearOperator`.
Given `A` representing this `LinearOperator`, if `A` is positive definite
self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
decomposition.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the lower triangular matrix
in the Cholesky decomposition.
Raises:
ValueError: When the `LinearOperator` is not hinted to be positive
definite and self adjoint.
"""
if not self._can_use_cholesky():
raise ValueError("Cannot take the Cholesky decomposition: "
"Not a positive definite self adjoint matrix.")
with self._name_scope(name):
return linear_operator_algebra.cholesky(self)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
logging.warn("Using (possibly slow) default implementation of to_dense."
" Converts by self.matmul(identity).")
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
n = dim_value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.matmul(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.matrix_diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _trace(self):
return math_ops.reduce_sum(self.diag_part(), axis=-1)
def trace(self, name="trace"):
"""Trace of the linear operator, equal to sum of `self.diag_part()`.
If the operator is square, this is also the sum of the eigenvalues.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._trace()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
def _can_use_cholesky(self):
return self.is_self_adjoint and self.is_positive_definite
| apache-2.0 |
thesuperzapper/tensorflow | tensorflow/python/kernel_tests/attention_ops_test.py | 85 | 7316 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image.extract_glimpse()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class ExtractGlimpseTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
expected_rows, expected_cols):
"""Verifies the output values of the glimpse extraction kernel.
Args:
tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
offsets: Relative location of the center of the glimpse in the input
image expressed as [row_offset, col_offset].
expected_rows: A list containing the expected row numbers (None for
out of bound entries that are expected to be replaced by uniform
random entries in [0,1) ).
expected_cols: Same as expected_rows, but for column numbers.
"""
rows = tensor_in_sizes[0]
cols = tensor_in_sizes[1]
# Row Tensor with entries by row.
# [[ 1 1 1 ... ]
# [ 2 2 2 ... ]
# [ 3 3 3 ... ]
# [ ...
# ]
t_rows = array_ops.tile(
[[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_rows_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
[0, 2, 1, 3])
# Column Tensor with entries by column.
# [[ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ ... ]
# ]
t_cols = array_ops.tile(
[[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_cols_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
[0, 2, 1, 3])
# extract_glimpses from Row and Column Tensor, respectively.
# Switch order for glimpse_sizes and offsets to switch from (row, col)
# convention to tensorflows (height, width) convention.
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
glimpse_rows = (array_ops.transpose(
image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
glimpse_cols = (array_ops.transpose(
image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the TensorFlow Graph.
with self.test_session() as sess:
value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols])
# Check dimensions of returned glimpse.
self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
# Check entries.
min_random_val = 0
max_random_val = max(rows, cols)
for i in range(glimpse_sizes[0]):
for j in range(glimpse_sizes[1]):
if expected_rows[i] is None or expected_cols[j] is None:
self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
else:
self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
def testCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.0, 0.0],
expected_rows=[20, 21, 22],
expected_cols=[29, 30, 31, 32, 33])
def testEmptyTensor(self):
empty_image = np.zeros((0, 4, 3, 0))
offsets = np.zeros((0, 2))
with self.test_session():
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
self.assertAllEqual(
np.zeros(
(0, 1, 1, 0), dtype=np.float32), result.eval())
def testLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[41, 61],
offsets=[0.0, 0.0],
expected_rows=list(range(1, 42)),
expected_cols=list(range(1, 62)))
def testTooLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[43, 63],
offsets=[0.0, 0.0],
expected_rows=[None] + list(range(1, 42)) + [None],
expected_cols=[None] + list(range(1, 62)) + [None])
def testGlimpseFullOverlap(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.1, 0.3],
expected_rows=[22, 23, 24],
expected_cols=[38, 39, 40, 41, 42])
def testGlimpseFullOverlap2(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 3],
offsets=[-0.7, -0.7],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[8, 9, 10])
def testGlimpseBeforeLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 5],
offsets=[-0.7, -0.9],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[1, 2, 3, 4, 5])
def testGlimpseLowerRightCorner(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[1.0, 1.0],
expected_rows=[38, 39, 40, 41, None, None, None],
expected_cols=[59, 60, 61, None, None])
def testGlimpseNoOverlap(self):
self._VerifyValues(
tensor_in_sizes=[20, 30],
glimpse_sizes=[3, 3],
offsets=[-2.0, 2.0],
expected_rows=[None, None, None],
expected_cols=[None, None, None])
def testGlimpseOnLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 7],
offsets=[-0.7, -1.0],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[None, None, None, 1, 2, 3, 4])
def testGlimpseUpperMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[-1, 0.9],
expected_rows=[None, None, None, 1, 2, 3, 4],
expected_cols=[56, 57, 58, 59, 60])
if __name__ == '__main__':
test.main()
| apache-2.0 |
staslev/incubator-beam | sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_messages.py | 5 | 195799 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated message classes for dataflow version v1b3.
Develops and executes data processing patterns like ETL, batch computation,
and continuous computation.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'dataflow'
class ApproximateProgress(_messages.Message):
"""Obsolete in favor of ApproximateReportedProgress and
ApproximateSplitRequest.
Fields:
percentComplete: Obsolete.
position: Obsolete.
remainingTime: Obsolete.
"""
percentComplete = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
position = _messages.MessageField('Position', 2)
remainingTime = _messages.StringField(3)
class ApproximateReportedProgress(_messages.Message):
"""A progress measurement of a WorkItem by a worker.
Fields:
consumedParallelism: Total amount of parallelism in the portion of input
of this task that has already been consumed and is no longer active. In
the first two examples above (see remaining_parallelism), the value
should be 29 or 2 respectively. The sum of remaining_parallelism and
consumed_parallelism should equal the total amount of parallelism in
this work item. If specified, must be finite.
fractionConsumed: Completion as fraction of the input consumed, from 0.0
(beginning, nothing consumed), to 1.0 (end of the input, entire input
consumed).
position: A Position within the work to represent a progress.
remainingParallelism: Total amount of parallelism in the input of this
task that remains, (i.e. can be delegated to this task and any new tasks
via dynamic splitting). Always at least 1 for non-finished work items
and 0 for finished. "Amount of parallelism" refers to how many non-
empty parts of the input can be read in parallel. This does not
necessarily equal number of records. An input that can be read in
parallel down to the individual records is called "perfectly
splittable". An example of non-perfectly parallelizable input is a
block-compressed file format where a block of records has to be read as
a whole, but different blocks can be read in parallel. Examples: * If
we are processing record #30 (starting at 1) out of 50 in a perfectly
splittable 50-record input, this value should be 21 (20 remaining + 1
current). * If we are reading through block 3 in a block-compressed file
consisting of 5 blocks, this value should be 3 (since blocks 4 and 5
can be processed in parallel by new tasks via dynamic splitting and
the current task remains processing block 3). * If we are reading
through the last block in a block-compressed file, or reading or
processing the last record in a perfectly splittable input, this value
should be 1, because apart from the current task, no additional
remainder can be split off.
"""
consumedParallelism = _messages.MessageField('ReportedParallelism', 1)
fractionConsumed = _messages.FloatField(2)
position = _messages.MessageField('Position', 3)
remainingParallelism = _messages.MessageField('ReportedParallelism', 4)
class ApproximateSplitRequest(_messages.Message):
"""A suggestion by the service to the worker to dynamically split the
WorkItem.
Fields:
fractionConsumed: A fraction at which to split the work item, from 0.0
(beginning of the input) to 1.0 (end of the input).
position: A Position at which to split the work item.
"""
fractionConsumed = _messages.FloatField(1)
position = _messages.MessageField('Position', 2)
class AutoscalingEvent(_messages.Message):
"""A structured message reporting an autoscaling decision made by the
Dataflow service.
Enums:
EventTypeValueValuesEnum: The type of autoscaling event to report.
Fields:
currentNumWorkers: The current number of workers the job has.
description: A message describing why the system decided to adjust the
current number of workers, why it failed, or why the system decided to
not make any changes to the number of workers.
eventType: The type of autoscaling event to report.
targetNumWorkers: The target number of workers the worker pool wants to
resize to use.
time: The time this event was emitted to indicate a new target or current
num_workers value.
"""
class EventTypeValueValuesEnum(_messages.Enum):
"""The type of autoscaling event to report.
Values:
TYPE_UNKNOWN: Default type for the enum. Value should never be
returned.
TARGET_NUM_WORKERS_CHANGED: The TARGET_NUM_WORKERS_CHANGED type should
be used when the target worker pool size has changed at the start of
an actuation. An event should always be specified as
TARGET_NUM_WORKERS_CHANGED if it reflects a change in the
target_num_workers.
CURRENT_NUM_WORKERS_CHANGED: The CURRENT_NUM_WORKERS_CHANGED type should
be used when actual worker pool size has been changed, but the
target_num_workers has not changed.
ACTUATION_FAILURE: The ACTUATION_FAILURE type should be used when we
want to report an error to the user indicating why the current number
of workers in the pool could not be changed. Displayed in the current
status and history widgets.
NO_CHANGE: Used when we want to report to the user a reason why we are
not currently adjusting the number of workers. Should specify both
target_num_workers, current_num_workers and a decision_message.
"""
TYPE_UNKNOWN = 0
TARGET_NUM_WORKERS_CHANGED = 1
CURRENT_NUM_WORKERS_CHANGED = 2
ACTUATION_FAILURE = 3
NO_CHANGE = 4
currentNumWorkers = _messages.IntegerField(1)
description = _messages.MessageField('StructuredMessage', 2)
eventType = _messages.EnumField('EventTypeValueValuesEnum', 3)
targetNumWorkers = _messages.IntegerField(4)
time = _messages.StringField(5)
class AutoscalingSettings(_messages.Message):
"""Settings for WorkerPool autoscaling.
Enums:
AlgorithmValueValuesEnum: The algorithm to use for autoscaling.
Fields:
algorithm: The algorithm to use for autoscaling.
maxNumWorkers: The maximum number of workers to cap scaling at.
"""
class AlgorithmValueValuesEnum(_messages.Enum):
"""The algorithm to use for autoscaling.
Values:
AUTOSCALING_ALGORITHM_UNKNOWN: The algorithm is unknown, or unspecified.
AUTOSCALING_ALGORITHM_NONE: Disable autoscaling.
AUTOSCALING_ALGORITHM_BASIC: Increase worker count over time to reduce
job execution time.
"""
AUTOSCALING_ALGORITHM_UNKNOWN = 0
AUTOSCALING_ALGORITHM_NONE = 1
AUTOSCALING_ALGORITHM_BASIC = 2
algorithm = _messages.EnumField('AlgorithmValueValuesEnum', 1)
maxNumWorkers = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class CPUTime(_messages.Message):
"""Modeled after information exposed by /proc/stat.
Fields:
rate: Average CPU utilization rate (% non-idle cpu / second) since
previous sample.
timestamp: Timestamp of the measurement.
totalMs: Total active CPU time across all cores (ie., non-idle) in
milliseconds since start-up.
"""
rate = _messages.FloatField(1)
timestamp = _messages.StringField(2)
totalMs = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class ComponentSource(_messages.Message):
"""Description of an interstitial value between transforms in an execution
stage.
Fields:
name: Dataflow service generated name for this source.
originalTransformOrCollection: User name for the original user transform
or collection with which this source is most closely associated.
userName: Human-readable name for this transform; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransformOrCollection = _messages.StringField(2)
userName = _messages.StringField(3)
class ComponentTransform(_messages.Message):
"""Description of a transform executed as part of an execution stage.
Fields:
name: Dataflow service generated name for this source.
originalTransform: User name for the original user transform with which
this transform is most closely associated.
userName: Human-readable name for this transform; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransform = _messages.StringField(2)
userName = _messages.StringField(3)
class ComputationTopology(_messages.Message):
"""All configuration data for a particular Computation.
Fields:
computationId: The ID of the computation.
inputs: The inputs to the computation.
keyRanges: The key ranges processed by the computation.
outputs: The outputs from the computation.
stateFamilies: The state family values.
systemStageName: The system stage name.
"""
computationId = _messages.StringField(1)
inputs = _messages.MessageField('StreamLocation', 2, repeated=True)
keyRanges = _messages.MessageField('KeyRangeLocation', 3, repeated=True)
outputs = _messages.MessageField('StreamLocation', 4, repeated=True)
stateFamilies = _messages.MessageField('StateFamilyConfig', 5, repeated=True)
systemStageName = _messages.StringField(6)
class ConcatPosition(_messages.Message):
"""A position that encapsulates an inner position and an index for the inner
position. A ConcatPosition can be used by a reader of a source that
encapsulates a set of other sources.
Fields:
index: Index of the inner source.
position: Position within the inner source.
"""
index = _messages.IntegerField(1, variant=_messages.Variant.INT32)
position = _messages.MessageField('Position', 2)
class CounterMetadata(_messages.Message):
"""CounterMetadata includes all static non-name non-value counter
attributes.
Enums:
KindValueValuesEnum: Counter aggregation kind.
StandardUnitsValueValuesEnum: System defined Units, see above enum.
Fields:
description: Human-readable description of the counter semantics.
kind: Counter aggregation kind.
otherUnits: A string referring to the unit type.
standardUnits: System defined Units, see above enum.
"""
class KindValueValuesEnum(_messages.Enum):
"""Counter aggregation kind.
Values:
INVALID: Counter aggregation kind was not set.
SUM: Aggregated value is the sum of all contributed values.
MAX: Aggregated value is the max of all contributed values.
MIN: Aggregated value is the min of all contributed values.
MEAN: Aggregated value is the mean of all contributed values.
OR: Aggregated value represents the logical 'or' of all contributed
values.
AND: Aggregated value represents the logical 'and' of all contributed
values.
SET: Aggregated value is a set of unique contributed values.
DISTRIBUTION: Aggregated value captures statistics about a distribution.
"""
INVALID = 0
SUM = 1
MAX = 2
MIN = 3
MEAN = 4
OR = 5
AND = 6
SET = 7
DISTRIBUTION = 8
class StandardUnitsValueValuesEnum(_messages.Enum):
"""System defined Units, see above enum.
Values:
BYTES: Counter returns a value in bytes.
BYTES_PER_SEC: Counter returns a value in bytes per second.
MILLISECONDS: Counter returns a value in milliseconds.
MICROSECONDS: Counter returns a value in microseconds.
NANOSECONDS: Counter returns a value in nanoseconds.
TIMESTAMP_MSEC: Counter returns a timestamp in milliseconds.
TIMESTAMP_USEC: Counter returns a timestamp in microseconds.
TIMESTAMP_NSEC: Counter returns a timestamp in nanoseconds.
"""
BYTES = 0
BYTES_PER_SEC = 1
MILLISECONDS = 2
MICROSECONDS = 3
NANOSECONDS = 4
TIMESTAMP_MSEC = 5
TIMESTAMP_USEC = 6
TIMESTAMP_NSEC = 7
description = _messages.StringField(1)
kind = _messages.EnumField('KindValueValuesEnum', 2)
otherUnits = _messages.StringField(3)
standardUnits = _messages.EnumField('StandardUnitsValueValuesEnum', 4)
class CounterStructuredName(_messages.Message):
"""Identifies a counter within a per-job namespace. Counters whose
structured names are the same get merged into a single value for the job.
Enums:
OriginValueValuesEnum: One of the standard Origins defined above.
PortionValueValuesEnum: Portion of this counter, either key or value.
Fields:
componentStepName: Name of the optimized step being executed by the
workers.
executionStepName: Name of the stage. An execution step contains multiple
component steps.
name: Counter name. Not necessarily globally-unique, but unique within the
context of the other fields. Required.
origin: One of the standard Origins defined above.
originNamespace: A string containing a more specific namespace of the
counter's origin.
originalStepName: System generated name of the original step in the user's
graph, before optimization.
portion: Portion of this counter, either key or value.
workerId: ID of a particular worker.
"""
class OriginValueValuesEnum(_messages.Enum):
"""One of the standard Origins defined above.
Values:
SYSTEM: Counter was created by the Dataflow system.
USER: Counter was created by the user.
"""
SYSTEM = 0
USER = 1
class PortionValueValuesEnum(_messages.Enum):
"""Portion of this counter, either key or value.
Values:
ALL: Counter portion has not been set.
KEY: Counter reports a key.
VALUE: Counter reports a value.
"""
ALL = 0
KEY = 1
VALUE = 2
componentStepName = _messages.StringField(1)
executionStepName = _messages.StringField(2)
name = _messages.StringField(3)
origin = _messages.EnumField('OriginValueValuesEnum', 4)
originNamespace = _messages.StringField(5)
originalStepName = _messages.StringField(6)
portion = _messages.EnumField('PortionValueValuesEnum', 7)
workerId = _messages.StringField(8)
class CounterStructuredNameAndMetadata(_messages.Message):
"""A single message which encapsulates structured name and metadata for a
given counter.
Fields:
metadata: Metadata associated with a counter
name: Structured name of the counter.
"""
metadata = _messages.MessageField('CounterMetadata', 1)
name = _messages.MessageField('CounterStructuredName', 2)
class CounterUpdate(_messages.Message):
"""An update to a Counter sent from a worker.
Fields:
boolean: Boolean value for And, Or.
cumulative: True if this counter is reported as the total cumulative
aggregate value accumulated since the worker started working on this
WorkItem. By default this is false, indicating that this counter is
reported as a delta.
distribution: Distribution data
floatingPoint: Floating point value for Sum, Max, Min.
floatingPointList: List of floating point numbers, for Set.
floatingPointMean: Floating point mean aggregation value for Mean.
integer: Integer value for Sum, Max, Min.
integerList: List of integers, for Set.
integerMean: Integer mean aggregation value for Mean.
internal: Value for internally-defined counters used by the Dataflow
service.
nameAndKind: Counter name and aggregation type.
shortId: The service-generated short identifier for this counter. The
short_id -> (name, metadata) mapping is constant for the lifetime of a
job.
stringList: List of strings, for Set.
structuredNameAndMetadata: Counter structured name and metadata.
"""
boolean = _messages.BooleanField(1)
cumulative = _messages.BooleanField(2)
distribution = _messages.MessageField('DistributionUpdate', 3)
floatingPoint = _messages.FloatField(4)
floatingPointList = _messages.MessageField('FloatingPointList', 5)
floatingPointMean = _messages.MessageField('FloatingPointMean', 6)
integer = _messages.MessageField('SplitInt64', 7)
integerList = _messages.MessageField('IntegerList', 8)
integerMean = _messages.MessageField('IntegerMean', 9)
internal = _messages.MessageField('extra_types.JsonValue', 10)
nameAndKind = _messages.MessageField('NameAndKind', 11)
shortId = _messages.IntegerField(12)
stringList = _messages.MessageField('StringList', 13)
structuredNameAndMetadata = _messages.MessageField('CounterStructuredNameAndMetadata', 14)
class CreateJobFromTemplateRequest(_messages.Message):
"""A request to create a Cloud Dataflow job from a template.
Messages:
ParametersValue: The runtime parameters to pass to the job.
Fields:
environment: The runtime environment for the job.
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
jobName: Required. The job name to use for the created job.
location: The location to which to direct the request.
parameters: The runtime parameters to pass to the job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
"""The runtime parameters to pass to the job.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
environment = _messages.MessageField('RuntimeEnvironment', 1)
gcsPath = _messages.StringField(2)
jobName = _messages.StringField(3)
location = _messages.StringField(4)
parameters = _messages.MessageField('ParametersValue', 5)
class CustomSourceLocation(_messages.Message):
"""Identifies the location of a custom souce.
Fields:
stateful: Whether this source is stateful.
"""
stateful = _messages.BooleanField(1)
class DataDiskAssignment(_messages.Message):
"""Data disk assignment for a given VM instance.
Fields:
dataDisks: Mounted data disks. The order is important a data disk's
0-based index in this list defines which persistent directory the disk
is mounted to, for example the list of {
"myproject-1014-104817-4c2-harness-0-disk-0" }, {
"myproject-1014-104817-4c2-harness-0-disk-1" }.
vmInstance: VM instance name the data disks mounted to, for example
"myproject-1014-104817-4c2-harness-0".
"""
dataDisks = _messages.StringField(1, repeated=True)
vmInstance = _messages.StringField(2)
class DataflowProjectsJobsAggregatedRequest(_messages.Message):
"""A DataflowProjectsJobsAggregatedRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The location that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsJobsCreateRequest(_messages.Message):
"""A DataflowProjectsJobsCreateRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
job: A Job resource to be passed as the request body.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: Deprecated. This field is now in the Job message.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
job = _messages.MessageField('Job', 1)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
replaceJobId = _messages.StringField(4)
view = _messages.EnumField('ViewValueValuesEnum', 5)
class DataflowProjectsJobsDebugGetConfigRequest(_messages.Message):
"""A DataflowProjectsJobsDebugGetConfigRequest object.
Fields:
getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as
the request body.
jobId: The job id.
projectId: The project id.
"""
getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsJobsDebugSendCaptureRequest(_messages.Message):
"""A DataflowProjectsJobsDebugSendCaptureRequest object.
Fields:
jobId: The job id.
projectId: The project id.
sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed
as the request body.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 3)
class DataflowProjectsJobsGetMetricsRequest(_messages.Message):
"""A DataflowProjectsJobsGetMetricsRequest object.
Fields:
jobId: The job to get messages for.
location: The location which contains the job specified by job_id.
projectId: A project id.
startTime: Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
startTime = _messages.StringField(4)
class DataflowProjectsJobsGetRequest(_messages.Message):
"""A DataflowProjectsJobsGetRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsJobsListRequest(_messages.Message):
"""A DataflowProjectsJobsListRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The location that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsJobsMessagesListRequest(_messages.Message):
"""A DataflowProjectsJobsMessagesListRequest object.
Enums:
MinimumImportanceValueValuesEnum: Filter to only get messages with
importance >= level
Fields:
endTime: Return only messages with timestamps < end_time. The default is
now (i.e. return up to the latest messages available).
jobId: The job to get messages about.
location: The location which contains the job specified by job_id.
minimumImportance: Filter to only get messages with importance >= level
pageSize: If specified, determines the maximum number of messages to
return. If unspecified, the service may choose an appropriate default,
or may return an arbitrarily large number of results.
pageToken: If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page of results to
be returned.
projectId: A project id.
startTime: If specified, return only messages with timestamps >=
start_time. The default is the job creation time (i.e. beginning of
messages).
"""
class MinimumImportanceValueValuesEnum(_messages.Enum):
"""Filter to only get messages with importance >= level
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description>
JOB_MESSAGE_DEBUG: <no description>
JOB_MESSAGE_DETAILED: <no description>
JOB_MESSAGE_BASIC: <no description>
JOB_MESSAGE_WARNING: <no description>
JOB_MESSAGE_ERROR: <no description>
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
endTime = _messages.StringField(1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3)
minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4)
pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(6)
projectId = _messages.StringField(7, required=True)
startTime = _messages.StringField(8)
class DataflowProjectsJobsUpdateRequest(_messages.Message):
"""A DataflowProjectsJobsUpdateRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsJobsWorkItemsLeaseRequest(_messages.Message):
"""A DataflowProjectsJobsWorkItemsLeaseRequest object.
Fields:
jobId: Identifies the workflow job this worker belongs to.
leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the
request body.
projectId: Identifies the project this worker belongs to.
"""
jobId = _messages.StringField(1, required=True)
leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsJobsWorkItemsReportStatusRequest(_messages.Message):
"""A DataflowProjectsJobsWorkItemsReportStatusRequest object.
Fields:
jobId: The job which the WorkItem is part of.
projectId: The project which owns the WorkItem's job.
reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be
passed as the request body.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 3)
class DataflowProjectsLocationsJobsCreateRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsCreateRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
job: A Job resource to be passed as the request body.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: Deprecated. This field is now in the Job message.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
job = _messages.MessageField('Job', 1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
replaceJobId = _messages.StringField(4)
view = _messages.EnumField('ViewValueValuesEnum', 5)
class DataflowProjectsLocationsJobsDebugGetConfigRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsDebugGetConfigRequest object.
Fields:
getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as
the request body.
jobId: The job id.
location: The location which contains the job specified by job_id.
projectId: The project id.
"""
getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsDebugSendCaptureRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsDebugSendCaptureRequest object.
Fields:
jobId: The job id.
location: The location which contains the job specified by job_id.
projectId: The project id.
sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed
as the request body.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 4)
class DataflowProjectsLocationsJobsGetMetricsRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsGetMetricsRequest object.
Fields:
jobId: The job to get messages for.
location: The location which contains the job specified by job_id.
projectId: A project id.
startTime: Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
startTime = _messages.StringField(4)
class DataflowProjectsLocationsJobsGetRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsGetRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsLocationsJobsListRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsListRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The location that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsLocationsJobsMessagesListRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsMessagesListRequest object.
Enums:
MinimumImportanceValueValuesEnum: Filter to only get messages with
importance >= level
Fields:
endTime: Return only messages with timestamps < end_time. The default is
now (i.e. return up to the latest messages available).
jobId: The job to get messages about.
location: The location which contains the job specified by job_id.
minimumImportance: Filter to only get messages with importance >= level
pageSize: If specified, determines the maximum number of messages to
return. If unspecified, the service may choose an appropriate default,
or may return an arbitrarily large number of results.
pageToken: If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page of results to
be returned.
projectId: A project id.
startTime: If specified, return only messages with timestamps >=
start_time. The default is the job creation time (i.e. beginning of
messages).
"""
class MinimumImportanceValueValuesEnum(_messages.Enum):
"""Filter to only get messages with importance >= level
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description>
JOB_MESSAGE_DEBUG: <no description>
JOB_MESSAGE_DETAILED: <no description>
JOB_MESSAGE_BASIC: <no description>
JOB_MESSAGE_WARNING: <no description>
JOB_MESSAGE_ERROR: <no description>
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
endTime = _messages.StringField(1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4)
pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(6)
projectId = _messages.StringField(7, required=True)
startTime = _messages.StringField(8)
class DataflowProjectsLocationsJobsUpdateRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsUpdateRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsWorkItemsLeaseRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsWorkItemsLeaseRequest object.
Fields:
jobId: Identifies the workflow job this worker belongs to.
leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the
request body.
location: The location which contains the WorkItem's job.
projectId: Identifies the project this worker belongs to.
"""
jobId = _messages.StringField(1, required=True)
leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsWorkItemsReportStatusRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsWorkItemsReportStatusRequest object.
Fields:
jobId: The job which the WorkItem is part of.
location: The location which contains the WorkItem's job.
projectId: The project which owns the WorkItem's job.
reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be
passed as the request body.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 4)
class DataflowProjectsLocationsTemplatesCreateRequest(_messages.Message):
"""A DataflowProjectsLocationsTemplatesCreateRequest object.
Fields:
createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to
be passed as the request body.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
"""
createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsLocationsTemplatesGetRequest(_messages.Message):
"""A DataflowProjectsLocationsTemplatesGetRequest object.
Enums:
ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
view: The view to retrieve. Defaults to METADATA_ONLY.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The view to retrieve. Defaults to METADATA_ONLY.
Values:
METADATA_ONLY: <no description>
"""
METADATA_ONLY = 0
gcsPath = _messages.StringField(1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsLocationsTemplatesLaunchRequest(_messages.Message):
"""A DataflowProjectsLocationsTemplatesLaunchRequest object.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
launchTemplateParameters: A LaunchTemplateParameters resource to be passed
as the request body.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
validateOnly: If true, the request is validated but not actually executed.
Defaults to false.
"""
gcsPath = _messages.StringField(1)
launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 2)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
validateOnly = _messages.BooleanField(5)
class DataflowProjectsLocationsWorkerMessagesRequest(_messages.Message):
"""A DataflowProjectsLocationsWorkerMessagesRequest object.
Fields:
location: The location which contains the job
projectId: The project to send the WorkerMessages to.
sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be
passed as the request body.
"""
location = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 3)
class DataflowProjectsTemplatesCreateRequest(_messages.Message):
"""A DataflowProjectsTemplatesCreateRequest object.
Fields:
createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to
be passed as the request body.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
"""
createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1)
projectId = _messages.StringField(2, required=True)
class DataflowProjectsTemplatesGetRequest(_messages.Message):
"""A DataflowProjectsTemplatesGetRequest object.
Enums:
ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
view: The view to retrieve. Defaults to METADATA_ONLY.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The view to retrieve. Defaults to METADATA_ONLY.
Values:
METADATA_ONLY: <no description>
"""
METADATA_ONLY = 0
gcsPath = _messages.StringField(1)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsTemplatesLaunchRequest(_messages.Message):
"""A DataflowProjectsTemplatesLaunchRequest object.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
launchTemplateParameters: A LaunchTemplateParameters resource to be passed
as the request body.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
validateOnly: If true, the request is validated but not actually executed.
Defaults to false.
"""
gcsPath = _messages.StringField(1)
launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 2)
location = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
validateOnly = _messages.BooleanField(5)
class DataflowProjectsWorkerMessagesRequest(_messages.Message):
"""A DataflowProjectsWorkerMessagesRequest object.
Fields:
projectId: The project to send the WorkerMessages to.
sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be
passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 2)
class DerivedSource(_messages.Message):
"""Specification of one of the bundles produced as a result of splitting a
Source (e.g. when executing a SourceSplitRequest, or when splitting an
active task using WorkItemStatus.dynamic_source_split), relative to the
source being split.
Enums:
DerivationModeValueValuesEnum: What source to base the produced source on
(if any).
Fields:
derivationMode: What source to base the produced source on (if any).
source: Specification of the source.
"""
class DerivationModeValueValuesEnum(_messages.Enum):
"""What source to base the produced source on (if any).
Values:
SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or
unspecified.
SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent
Source with no base.
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the
Source being split.
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the
base of the Source being split.
"""
SOURCE_DERIVATION_MODE_UNKNOWN = 0
SOURCE_DERIVATION_MODE_INDEPENDENT = 1
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3
derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1)
source = _messages.MessageField('Source', 2)
class Disk(_messages.Message):
"""Describes the data disk used by a workflow job.
Fields:
diskType: Disk storage type, as defined by Google Compute Engine. This
must be a disk type appropriate to the project and zone in which the
workers will run. If unknown or unspecified, the service will attempt
to choose a reasonable default. For example, the standard persistent
disk type is a resource name typically ending in "pd-standard". If SSD
persistent disks are available, the resource name typically ends with
"pd-ssd". The actual valid values are defined the Google Compute Engine
API, not by the Cloud Dataflow API; consult the Google Compute Engine
documentation for more information about determining the set of
available disk types for a particular project and zone. Google Compute
Engine Disk types are local to a particular project in a particular
zone, and so the resource name will typically look something like this:
compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-
standard
mountPoint: Directory in a VM where disk is mounted.
sizeGb: Size of disk in GB. If zero or unspecified, the service will
attempt to choose a reasonable default.
"""
diskType = _messages.StringField(1)
mountPoint = _messages.StringField(2)
sizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class DisplayData(_messages.Message):
"""Data provided with a pipeline or transform to provide descriptive info.
Fields:
boolValue: Contains value if the data is of a boolean type.
durationValue: Contains value if the data is of duration type.
floatValue: Contains value if the data is of float type.
int64Value: Contains value if the data is of int64 type.
javaClassValue: Contains value if the data is of java class type.
key: The key identifying the display data. This is intended to be used as
a label for the display data when viewed in a dax monitoring system.
label: An optional label to display in a dax UI for the element.
namespace: The namespace for the key. This is usually a class name or
programming language namespace (i.e. python module) which defines the
display data. This allows a dax monitoring system to specially handle
the data and perform custom rendering.
shortStrValue: A possible additional shorter value to display. For example
a java_class_name_value of com.mypackage.MyDoFn will be stored with
MyDoFn as the short_str_value and com.mypackage.MyDoFn as the
java_class_name value. short_str_value can be displayed and
java_class_name_value will be displayed as a tooltip.
strValue: Contains value if the data is of string type.
timestampValue: Contains value if the data is of timestamp type.
url: An optional full URL.
"""
boolValue = _messages.BooleanField(1)
durationValue = _messages.StringField(2)
floatValue = _messages.FloatField(3, variant=_messages.Variant.FLOAT)
int64Value = _messages.IntegerField(4)
javaClassValue = _messages.StringField(5)
key = _messages.StringField(6)
label = _messages.StringField(7)
namespace = _messages.StringField(8)
shortStrValue = _messages.StringField(9)
strValue = _messages.StringField(10)
timestampValue = _messages.StringField(11)
url = _messages.StringField(12)
class DistributionUpdate(_messages.Message):
"""A metric value representing a distribution.
Fields:
count: The count of the number of elements present in the distribution.
logBuckets: (Optional) Logarithmic histogram of values. Each log may be in
no more than one bucket. Order does not matter.
max: The maximum value present in the distribution.
min: The minimum value present in the distribution.
sum: Use an int64 since we'd prefer the added precision. If overflow is a
common problem we can detect it and use an additional int64 or a double.
sumOfSquares: Use a double since the sum of squares is likely to overflow
int64.
"""
count = _messages.MessageField('SplitInt64', 1)
logBuckets = _messages.MessageField('LogBucket', 2, repeated=True)
max = _messages.MessageField('SplitInt64', 3)
min = _messages.MessageField('SplitInt64', 4)
sum = _messages.MessageField('SplitInt64', 5)
sumOfSquares = _messages.FloatField(6)
class DynamicSourceSplit(_messages.Message):
"""When a task splits using WorkItemStatus.dynamic_source_split, this
message describes the two parts of the split relative to the description of
the current task's input.
Fields:
primary: Primary part (continued to be processed by worker). Specified
relative to the previously-current source. Becomes current.
residual: Residual part (returned to the pool of work). Specified relative
to the previously-current source.
"""
primary = _messages.MessageField('DerivedSource', 1)
residual = _messages.MessageField('DerivedSource', 2)
class Environment(_messages.Message):
"""Describes the environment in which a Dataflow Job runs.
Messages:
InternalExperimentsValue: Experimental settings.
SdkPipelineOptionsValue: The Cloud Dataflow SDK pipeline options specified
by the user. These options are passed through the service and are used
to recreate the SDK pipeline options on the worker in a language
agnostic and platform independent way.
UserAgentValue: A description of the process that generated the request.
VersionValue: A structure describing which components and their versions
of the service are required in order to run the job.
Fields:
clusterManagerApiService: The type of cluster manager API to use. If
unknown or unspecified, the service will attempt to choose a reasonable
default. This should be in the form of the API service name, e.g.
"compute.googleapis.com".
dataset: The dataset for the current project where various workflow
related tables are stored. The supported resource type is: Google
BigQuery: bigquery.googleapis.com/{dataset}
experiments: The list of experiments to enable.
internalExperiments: Experimental settings.
sdkPipelineOptions: The Cloud Dataflow SDK pipeline options specified by
the user. These options are passed through the service and are used to
recreate the SDK pipeline options on the worker in a language agnostic
and platform independent way.
serviceAccountEmail: Identity to run virtual machines as. Defaults to the
default account.
tempStoragePrefix: The prefix of the resources the system should use for
temporary storage. The system will append the suffix "/temp-{JOBNAME}
to this resource prefix, where {JOBNAME} is the value of the job_name
field. The resulting bucket and object prefix is used as the prefix of
the resources used to store temporary data needed during the job
execution. NOTE: This will override the value in taskrunner_settings.
The supported resource type is: Google Cloud Storage:
storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
userAgent: A description of the process that generated the request.
version: A structure describing which components and their versions of the
service are required in order to run the job.
workerPools: The worker pools. At least one "harness" worker pool must be
specified in order for the job to have workers.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class InternalExperimentsValue(_messages.Message):
"""Experimental settings.
Messages:
AdditionalProperty: An additional property for a
InternalExperimentsValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a InternalExperimentsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SdkPipelineOptionsValue(_messages.Message):
"""The Cloud Dataflow SDK pipeline options specified by the user. These
options are passed through the service and are used to recreate the SDK
pipeline options on the worker in a language agnostic and platform
independent way.
Messages:
AdditionalProperty: An additional property for a SdkPipelineOptionsValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a SdkPipelineOptionsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class UserAgentValue(_messages.Message):
"""A description of the process that generated the request.
Messages:
AdditionalProperty: An additional property for a UserAgentValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserAgentValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class VersionValue(_messages.Message):
"""A structure describing which components and their versions of the
service are required in order to run the job.
Messages:
AdditionalProperty: An additional property for a VersionValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a VersionValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterManagerApiService = _messages.StringField(1)
dataset = _messages.StringField(2)
experiments = _messages.StringField(3, repeated=True)
internalExperiments = _messages.MessageField('InternalExperimentsValue', 4)
sdkPipelineOptions = _messages.MessageField('SdkPipelineOptionsValue', 5)
serviceAccountEmail = _messages.StringField(6)
tempStoragePrefix = _messages.StringField(7)
userAgent = _messages.MessageField('UserAgentValue', 8)
version = _messages.MessageField('VersionValue', 9)
workerPools = _messages.MessageField('WorkerPool', 10, repeated=True)
class ExecutionStageState(_messages.Message):
"""A message describing the state of a particular execution stage.
Enums:
ExecutionStageStateValueValuesEnum: Executions stage states allow the same
set of values as JobState.
Fields:
currentStateTime: The time at which the stage transitioned to this state.
executionStageName: The name of the execution stage.
executionStageState: Executions stage states allow the same set of values
as JobState.
"""
class ExecutionStageStateValueValuesEnum(_messages.Enum):
"""Executions stage states allow the same set of values as JobState.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to 'JOB_STATE_CANCELLED' or
'JOB_STATE_FAILED'.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
currentStateTime = _messages.StringField(1)
executionStageName = _messages.StringField(2)
executionStageState = _messages.EnumField('ExecutionStageStateValueValuesEnum', 3)
class ExecutionStageSummary(_messages.Message):
"""Description of the composing transforms, names/ids, and input/outputs of
a stage of execution. Some composing transforms and sources may have been
generated by the Dataflow service during execution planning.
Enums:
KindValueValuesEnum: Type of tranform this stage is executing.
Fields:
componentSource: Collections produced and consumed by component transforms
of this stage.
componentTransform: Transforms that comprise this execution stage.
id: Dataflow service generated id for this stage.
inputSource: Input sources for this stage.
kind: Type of tranform this stage is executing.
name: Dataflow service generated name for this stage.
outputSource: Output sources for this stage.
"""
class KindValueValuesEnum(_messages.Enum):
"""Type of tranform this stage is executing.
Values:
UNKNOWN_KIND: Unrecognized transform type.
PAR_DO_KIND: ParDo transform.
GROUP_BY_KEY_KIND: Group By Key transform.
FLATTEN_KIND: Flatten transform.
READ_KIND: Read transform.
WRITE_KIND: Write transform.
CONSTANT_KIND: Constructs from a constant value, such as with Create.of.
SINGLETON_KIND: Creates a Singleton view of a collection.
SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a
GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
FLATTEN_KIND = 3
READ_KIND = 4
WRITE_KIND = 5
CONSTANT_KIND = 6
SINGLETON_KIND = 7
SHUFFLE_KIND = 8
componentSource = _messages.MessageField('ComponentSource', 1, repeated=True)
componentTransform = _messages.MessageField('ComponentTransform', 2, repeated=True)
id = _messages.StringField(3)
inputSource = _messages.MessageField('StageSource', 4, repeated=True)
kind = _messages.EnumField('KindValueValuesEnum', 5)
name = _messages.StringField(6)
outputSource = _messages.MessageField('StageSource', 7, repeated=True)
class FailedLocation(_messages.Message):
"""Indicates which location failed to respond to a request for data.
Fields:
name: The name of the failed location.
"""
name = _messages.StringField(1)
class FlattenInstruction(_messages.Message):
"""An instruction that copies its inputs (zero or more) to its (single)
output.
Fields:
inputs: Describes the inputs to the flatten instruction.
"""
inputs = _messages.MessageField('InstructionInput', 1, repeated=True)
class FloatingPointList(_messages.Message):
"""A metric value representing a list of floating point numbers.
Fields:
elements: Elements of the list.
"""
elements = _messages.FloatField(1, repeated=True)
class FloatingPointMean(_messages.Message):
"""A representation of a floating point mean metric contribution.
Fields:
count: The number of values being aggregated.
sum: The sum of all values being aggregated.
"""
count = _messages.MessageField('SplitInt64', 1)
sum = _messages.FloatField(2)
class GetDebugConfigRequest(_messages.Message):
"""Request to get updated debug configuration for component.
Fields:
componentId: The internal component id for which debug configuration is
requested.
location: The location which contains the job specified by job_id.
workerId: The worker id, i.e., VM hostname.
"""
componentId = _messages.StringField(1)
location = _messages.StringField(2)
workerId = _messages.StringField(3)
class GetDebugConfigResponse(_messages.Message):
"""Response to a get debug configuration request.
Fields:
config: The encoded debug configuration for the requested component.
"""
config = _messages.StringField(1)
class GetTemplateResponse(_messages.Message):
"""The response to a GetTemplate request.
Fields:
metadata: The template metadata describing the template name, available
parameters, etc.
status: The status of the get template request. Any problems with the
request will be indicated in the error_details.
"""
metadata = _messages.MessageField('TemplateMetadata', 1)
status = _messages.MessageField('Status', 2)
class InstructionInput(_messages.Message):
"""An input of an instruction, as a reference to an output of a producer
instruction.
Fields:
outputNum: The output index (origin zero) within the producer.
producerInstructionIndex: The index (origin zero) of the parallel
instruction that produces the output to be consumed by this input. This
index is relative to the list of instructions in this input's
instruction's containing MapTask.
"""
outputNum = _messages.IntegerField(1, variant=_messages.Variant.INT32)
producerInstructionIndex = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class InstructionOutput(_messages.Message):
"""An output of an instruction.
Messages:
CodecValue: The codec to use to encode data being written via this output.
Fields:
codec: The codec to use to encode data being written via this output.
name: The user-provided name of this output.
onlyCountKeyBytes: For system-generated byte and mean byte metrics,
certain instructions should only report the key size.
onlyCountValueBytes: For system-generated byte and mean byte metrics,
certain instructions should only report the value size.
originalName: System-defined name for this output in the original workflow
graph. Outputs that do not contribute to an original instruction do not
set this.
systemName: System-defined name of this output. Unique across the
workflow.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
"""The codec to use to encode data being written via this output.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 1)
name = _messages.StringField(2)
onlyCountKeyBytes = _messages.BooleanField(3)
onlyCountValueBytes = _messages.BooleanField(4)
originalName = _messages.StringField(5)
systemName = _messages.StringField(6)
class IntegerList(_messages.Message):
"""A metric value representing a list of integers.
Fields:
elements: Elements of the list.
"""
elements = _messages.MessageField('SplitInt64', 1, repeated=True)
class IntegerMean(_messages.Message):
"""A representation of an integer mean metric contribution.
Fields:
count: The number of values being aggregated.
sum: The sum of all values being aggregated.
"""
count = _messages.MessageField('SplitInt64', 1)
sum = _messages.MessageField('SplitInt64', 2)
class Job(_messages.Message):
"""Defines a job to be run by the Cloud Dataflow service.
Enums:
CurrentStateValueValuesEnum: The current state of the job. Jobs are
created in the `JOB_STATE_STOPPED` state unless otherwise specified. A
job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal
state. After a job has reached a terminal state, no further state
updates may be made. This field may be mutated by the Cloud Dataflow
service; callers cannot mutate it.
RequestedStateValueValuesEnum: The job's requested state. `UpdateJob` may
be used to switch between the `JOB_STATE_STOPPED` and
`JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may
also be used to directly set a job's requested state to
`JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
job if it has not already reached a terminal state.
TypeValueValuesEnum: The type of Cloud Dataflow job.
Messages:
LabelsValue: User-defined labels for this job. The labels map can contain
no more than 64 entries. Entries of the labels map are UTF8 strings
that comply with the following restrictions: * Keys must conform to
regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
TransformNameMappingValue: The map of transform name prefixes of the job
to be replaced to the corresponding name prefixes of the new job.
Fields:
clientRequestId: The client's unique identifier of the job, re-used across
retried attempts. If this field is set, the service will ensure its
uniqueness. The request to create a job will fail if the service has
knowledge of a previously submitted job with the same client's ID and
job name. The caller may use this field to ensure idempotence of job
creation across retried attempts to create a job. By default, the field
is empty and, in that case, the service ignores it.
createTime: The timestamp when the job was initially created. Immutable
and set by the Cloud Dataflow service.
currentState: The current state of the job. Jobs are created in the
`JOB_STATE_STOPPED` state unless otherwise specified. A job in the
`JOB_STATE_RUNNING` state may asynchronously enter a terminal state.
After a job has reached a terminal state, no further state updates may
be made. This field may be mutated by the Cloud Dataflow service;
callers cannot mutate it.
currentStateTime: The timestamp associated with the current state.
environment: The environment for the job.
executionInfo: Deprecated.
id: The unique ID of this job. This field is set by the Cloud Dataflow
service when the Job is created, and is immutable for the life of the
job.
labels: User-defined labels for this job. The labels map can contain no
more than 64 entries. Entries of the labels map are UTF8 strings that
comply with the following restrictions: * Keys must conform to regexp:
\p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
location: The location that contains this job.
name: The user-specified Cloud Dataflow job name. Only one Job with a
given name may exist in a project at any given time. If a caller
attempts to create a Job with the same name as an already-existing Job,
the attempt returns the existing Job. The name must match the regular
expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
pipelineDescription: Preliminary field: The format of this data may change
at any time. A description of the user pipeline and stages through which
it is executed. Created by Cloud Dataflow service. Only retrieved with
JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: If this job is an update of an existing job, this field is
the job ID of the job it replaced. When sending a `CreateJobRequest`,
you can update a job by specifying it here. The job named here is
stopped, and its intermediate state is transferred to this job.
replacedByJobId: If another job is an update of this job (and thus, this
job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
requestedState: The job's requested state. `UpdateJob` may be used to
switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states,
by setting requested_state. `UpdateJob` may also be used to directly
set a job's requested state to `JOB_STATE_CANCELLED` or
`JOB_STATE_DONE`, irrevocably terminating the job if it has not already
reached a terminal state.
stageStates: This field may be mutated by the Cloud Dataflow service;
callers cannot mutate it.
steps: The top-level steps that constitute the entire job.
tempFiles: A set of files the system should be aware of that are used for
temporary storage. These temporary files will be removed on job
completion. No duplicates are allowed. No file patterns are supported.
The supported files are: Google Cloud Storage:
storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
transformNameMapping: The map of transform name prefixes of the job to be
replaced to the corresponding name prefixes of the new job.
type: The type of Cloud Dataflow job.
"""
class CurrentStateValueValuesEnum(_messages.Enum):
"""The current state of the job. Jobs are created in the
`JOB_STATE_STOPPED` state unless otherwise specified. A job in the
`JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After
a job has reached a terminal state, no further state updates may be made.
This field may be mutated by the Cloud Dataflow service; callers cannot
mutate it.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to 'JOB_STATE_CANCELLED' or
'JOB_STATE_FAILED'.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
class RequestedStateValueValuesEnum(_messages.Enum):
"""The job's requested state. `UpdateJob` may be used to switch between
the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting
requested_state. `UpdateJob` may also be used to directly set a job's
requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably
terminating the job if it has not already reached a terminal state.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to 'JOB_STATE_CANCELLED' or
'JOB_STATE_FAILED'.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
class TypeValueValuesEnum(_messages.Enum):
"""The type of Cloud Dataflow job.
Values:
JOB_TYPE_UNKNOWN: The type of the job is unspecified, or unknown.
JOB_TYPE_BATCH: A batch job with a well-defined end point: data is read,
data is processed, data is written, and the job is done.
JOB_TYPE_STREAMING: A continuously streaming job with no end: data is
read, processed, and written continuously.
"""
JOB_TYPE_UNKNOWN = 0
JOB_TYPE_BATCH = 1
JOB_TYPE_STREAMING = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""User-defined labels for this job. The labels map can contain no more
than 64 entries. Entries of the labels map are UTF8 strings that comply
with the following restrictions: * Keys must conform to regexp:
\p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class TransformNameMappingValue(_messages.Message):
"""The map of transform name prefixes of the job to be replaced to the
corresponding name prefixes of the new job.
Messages:
AdditionalProperty: An additional property for a
TransformNameMappingValue object.
Fields:
additionalProperties: Additional properties of type
TransformNameMappingValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a TransformNameMappingValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clientRequestId = _messages.StringField(1)
createTime = _messages.StringField(2)
currentState = _messages.EnumField('CurrentStateValueValuesEnum', 3)
currentStateTime = _messages.StringField(4)
environment = _messages.MessageField('Environment', 5)
executionInfo = _messages.MessageField('JobExecutionInfo', 6)
id = _messages.StringField(7)
labels = _messages.MessageField('LabelsValue', 8)
location = _messages.StringField(9)
name = _messages.StringField(10)
pipelineDescription = _messages.MessageField('PipelineDescription', 11)
projectId = _messages.StringField(12)
replaceJobId = _messages.StringField(13)
replacedByJobId = _messages.StringField(14)
requestedState = _messages.EnumField('RequestedStateValueValuesEnum', 15)
stageStates = _messages.MessageField('ExecutionStageState', 16, repeated=True)
steps = _messages.MessageField('Step', 17, repeated=True)
tempFiles = _messages.StringField(18, repeated=True)
transformNameMapping = _messages.MessageField('TransformNameMappingValue', 19)
type = _messages.EnumField('TypeValueValuesEnum', 20)
class JobExecutionInfo(_messages.Message):
"""Additional information about how a Cloud Dataflow job will be executed
that isn't contained in the submitted job.
Messages:
StagesValue: A mapping from each stage to the information about that
stage.
Fields:
stages: A mapping from each stage to the information about that stage.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StagesValue(_messages.Message):
"""A mapping from each stage to the information about that stage.
Messages:
AdditionalProperty: An additional property for a StagesValue object.
Fields:
additionalProperties: Additional properties of type StagesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a StagesValue object.
Fields:
key: Name of the additional property.
value: A JobExecutionStageInfo attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JobExecutionStageInfo', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
stages = _messages.MessageField('StagesValue', 1)
class JobExecutionStageInfo(_messages.Message):
"""Contains information about how a particular google.dataflow.v1beta3.Step
will be executed.
Fields:
stepName: The steps associated with the execution stage. Note that stages
may have several steps, and that a given step might be run by more than
one stage.
"""
stepName = _messages.StringField(1, repeated=True)
class JobMessage(_messages.Message):
"""A particular message pertaining to a Dataflow job.
Enums:
MessageImportanceValueValuesEnum: Importance level of the message.
Fields:
id: Deprecated.
messageImportance: Importance level of the message.
messageText: The text of the message.
time: The timestamp of the message.
"""
class MessageImportanceValueValuesEnum(_messages.Enum):
"""Importance level of the message.
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: The message importance isn't specified,
or is unknown.
JOB_MESSAGE_DEBUG: The message is at the 'debug' level: typically only
useful for software engineers working on the code the job is running.
Typically, Dataflow pipeline runners do not display log messages at
this level by default.
JOB_MESSAGE_DETAILED: The message is at the 'detailed' level: somewhat
verbose, but potentially useful to users. Typically, Dataflow
pipeline runners do not display log messages at this level by default.
These messages are displayed by default in the Dataflow monitoring UI.
JOB_MESSAGE_BASIC: The message is at the 'basic' level: useful for
keeping track of the execution of a Dataflow pipeline. Typically,
Dataflow pipeline runners display log messages at this level by
default, and these messages are displayed by default in the Dataflow
monitoring UI.
JOB_MESSAGE_WARNING: The message is at the 'warning' level: indicating a
condition pertaining to a job which may require human intervention.
Typically, Dataflow pipeline runners display log messages at this
level by default, and these messages are displayed by default in the
Dataflow monitoring UI.
JOB_MESSAGE_ERROR: The message is at the 'error' level: indicating a
condition preventing a job from succeeding. Typically, Dataflow
pipeline runners display log messages at this level by default, and
these messages are displayed by default in the Dataflow monitoring UI.
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
id = _messages.StringField(1)
messageImportance = _messages.EnumField('MessageImportanceValueValuesEnum', 2)
messageText = _messages.StringField(3)
time = _messages.StringField(4)
class JobMetrics(_messages.Message):
"""JobMetrics contains a collection of metrics descibing the detailed
progress of a Dataflow job. Metrics correspond to user-defined and system-
defined metrics in the job. This resource captures only the most recent
values of each metric; time-series data can be queried for them (under the
same metric names) from Cloud Monitoring.
Fields:
metricTime: Timestamp as of which metric values are current.
metrics: All metrics for this job.
"""
metricTime = _messages.StringField(1)
metrics = _messages.MessageField('MetricUpdate', 2, repeated=True)
class KeyRangeDataDiskAssignment(_messages.Message):
"""Data disk assignment information for a specific key-range of a sharded
computation. Currently we only support UTF-8 character splits to simplify
encoding into JSON.
Fields:
dataDisk: The name of the data disk where data for this range is stored.
This name is local to the Google Cloud Platform project and uniquely
identifies the disk within that project, for example
"myproject-1014-104817-4c2-harness-0-disk-1".
end: The end (exclusive) of the key range.
start: The start (inclusive) of the key range.
"""
dataDisk = _messages.StringField(1)
end = _messages.StringField(2)
start = _messages.StringField(3)
class KeyRangeLocation(_messages.Message):
"""Location information for a specific key-range of a sharded computation.
Currently we only support UTF-8 character splits to simplify encoding into
JSON.
Fields:
dataDisk: The name of the data disk where data for this range is stored.
This name is local to the Google Cloud Platform project and uniquely
identifies the disk within that project, for example
"myproject-1014-104817-4c2-harness-0-disk-1".
deliveryEndpoint: The physical location of this range assignment to be
used for streaming computation cross-worker message delivery.
deprecatedPersistentDirectory: DEPRECATED. The location of the persistent
state for this range, as a persistent directory in the worker local
filesystem.
end: The end (exclusive) of the key range.
start: The start (inclusive) of the key range.
"""
dataDisk = _messages.StringField(1)
deliveryEndpoint = _messages.StringField(2)
deprecatedPersistentDirectory = _messages.StringField(3)
end = _messages.StringField(4)
start = _messages.StringField(5)
class LaunchTemplateParameters(_messages.Message):
"""Parameters to provide to the template being launched.
Messages:
ParametersValue: The runtime parameters to pass to the job.
Fields:
environment: The runtime environment for the job.
jobName: Required. The job name to use for the created job.
parameters: The runtime parameters to pass to the job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
"""The runtime parameters to pass to the job.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
environment = _messages.MessageField('RuntimeEnvironment', 1)
jobName = _messages.StringField(2)
parameters = _messages.MessageField('ParametersValue', 3)
class LaunchTemplateResponse(_messages.Message):
"""Response to the request to launch a template.
Fields:
job: The job that was launched, if the request was not a dry run and the
job was successfully launched.
"""
job = _messages.MessageField('Job', 1)
class LeaseWorkItemRequest(_messages.Message):
"""Request to lease WorkItems.
Fields:
currentWorkerTime: The current timestamp at the worker.
location: The location which contains the WorkItem's job.
requestedLeaseDuration: The initial lease period.
workItemTypes: Filter for WorkItem type.
workerCapabilities: Worker capabilities. WorkItems might be limited to
workers with specific capabilities.
workerId: Identifies the worker leasing work -- typically the ID of the
virtual machine running the worker.
"""
currentWorkerTime = _messages.StringField(1)
location = _messages.StringField(2)
requestedLeaseDuration = _messages.StringField(3)
workItemTypes = _messages.StringField(4, repeated=True)
workerCapabilities = _messages.StringField(5, repeated=True)
workerId = _messages.StringField(6)
class LeaseWorkItemResponse(_messages.Message):
"""Response to a request to lease WorkItems.
Fields:
workItems: A list of the leased WorkItems.
"""
workItems = _messages.MessageField('WorkItem', 1, repeated=True)
class ListJobMessagesResponse(_messages.Message):
"""Response to a request to list job messages.
Fields:
autoscalingEvents: Autoscaling events in ascending timestamp order.
jobMessages: Messages in ascending timestamp order.
nextPageToken: The token to obtain the next page of results if there are
more.
"""
autoscalingEvents = _messages.MessageField('AutoscalingEvent', 1, repeated=True)
jobMessages = _messages.MessageField('JobMessage', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class ListJobsResponse(_messages.Message):
"""Response to a request to list Cloud Dataflow jobs. This may be a partial
response, depending on the page size in the ListJobsRequest.
Fields:
failedLocation: Zero or more messages describing locations that failed to
respond.
jobs: A subset of the requested job information.
nextPageToken: Set if there may be more results than fit in this response.
"""
failedLocation = _messages.MessageField('FailedLocation', 1, repeated=True)
jobs = _messages.MessageField('Job', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class LogBucket(_messages.Message):
"""Bucket of values for Distribution's logarithmic histogram.
Fields:
count: Number of values in this bucket.
log: floor(log2(value)); defined to be zero for nonpositive values.
log(-1) = 0 log(0) = 0 log(1) = 0 log(2) = 1 log(3) = 1 log(4)
= 2 log(5) = 2
"""
count = _messages.IntegerField(1)
log = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class MapTask(_messages.Message):
"""MapTask consists of an ordered set of instructions, each of which
describes one particular low-level operation for the worker to perform in
order to accomplish the MapTask's WorkItem. Each instruction must appear in
the list before any instructions which depends on its output.
Fields:
instructions: The instructions in the MapTask.
stageName: System-defined name of the stage containing this MapTask.
Unique across the workflow.
systemName: System-defined name of this MapTask. Unique across the
workflow.
"""
instructions = _messages.MessageField('ParallelInstruction', 1, repeated=True)
stageName = _messages.StringField(2)
systemName = _messages.StringField(3)
class MetricShortId(_messages.Message):
"""The metric short id is returned to the user alongside an offset into
ReportWorkItemStatusRequest
Fields:
metricIndex: The index of the corresponding metric in the
ReportWorkItemStatusRequest. Required.
shortId: The service-generated short identifier for the metric.
"""
metricIndex = _messages.IntegerField(1, variant=_messages.Variant.INT32)
shortId = _messages.IntegerField(2)
class MetricStructuredName(_messages.Message):
"""Identifies a metric, by describing the source which generated the metric.
Messages:
ContextValue: Zero or more labeled fields which identify the part of the
job this metric is associated with, such as the name of a step or
collection. For example, built-in counters associated with steps will
have context['step'] = <step-name>. Counters associated with
PCollections in the SDK will have context['pcollection'] = <pcollection-
name>.
Fields:
context: Zero or more labeled fields which identify the part of the job
this metric is associated with, such as the name of a step or
collection. For example, built-in counters associated with steps will
have context['step'] = <step-name>. Counters associated with
PCollections in the SDK will have context['pcollection'] = <pcollection-
name>.
name: Worker-defined metric name.
origin: Origin (namespace) of metric name. May be blank for user-define
metrics; will be "dataflow" for metrics defined by the Dataflow service
or SDK.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ContextValue(_messages.Message):
"""Zero or more labeled fields which identify the part of the job this
metric is associated with, such as the name of a step or collection. For
example, built-in counters associated with steps will have context['step']
= <step-name>. Counters associated with PCollections in the SDK will have
context['pcollection'] = <pcollection-name>.
Messages:
AdditionalProperty: An additional property for a ContextValue object.
Fields:
additionalProperties: Additional properties of type ContextValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ContextValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
context = _messages.MessageField('ContextValue', 1)
name = _messages.StringField(2)
origin = _messages.StringField(3)
class MetricUpdate(_messages.Message):
"""Describes the state of a metric.
Fields:
cumulative: True if this metric is reported as the total cumulative
aggregate value accumulated since the worker started working on this
WorkItem. By default this is false, indicating that this metric is
reported as a delta that is not associated with any WorkItem.
distribution: A struct value describing properties of a distribution of
numeric values.
internal: Worker-computed aggregate value for internal use by the Dataflow
service.
kind: Metric aggregation kind. The possible metric aggregation kinds are
"Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution". The
specified aggregation kind is case-insensitive. If omitted, this is not
an aggregated value but instead a single metric sample value.
meanCount: Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the count of the aggregated values and is used in
combination with mean_sum above to obtain the actual mean aggregate
value. The only possible value type is Long.
meanSum: Worker-computed aggregate value for the "Mean" aggregation kind.
This holds the sum of the aggregated values and is used in combination
with mean_count below to obtain the actual mean aggregate value. The
only possible value types are Long and Double.
name: Name of the metric.
scalar: Worker-computed aggregate value for aggregation kinds "Sum",
"Max", "Min", "And", and "Or". The possible value types are Long,
Double, and Boolean.
set: Worker-computed aggregate value for the "Set" aggregation kind. The
only possible value type is a list of Values whose type can be Long,
Double, or String, according to the metric's type. All Values in the
list must be of the same type.
updateTime: Timestamp associated with the metric value. Optional when
workers are reporting work progress; it will be filled in responses from
the metrics API.
"""
cumulative = _messages.BooleanField(1)
distribution = _messages.MessageField('extra_types.JsonValue', 2)
internal = _messages.MessageField('extra_types.JsonValue', 3)
kind = _messages.StringField(4)
meanCount = _messages.MessageField('extra_types.JsonValue', 5)
meanSum = _messages.MessageField('extra_types.JsonValue', 6)
name = _messages.MessageField('MetricStructuredName', 7)
scalar = _messages.MessageField('extra_types.JsonValue', 8)
set = _messages.MessageField('extra_types.JsonValue', 9)
updateTime = _messages.StringField(10)
class MountedDataDisk(_messages.Message):
"""Describes mounted data disk.
Fields:
dataDisk: The name of the data disk. This name is local to the Google
Cloud Platform project and uniquely identifies the disk within that
project, for example "myproject-1014-104817-4c2-harness-0-disk-1".
"""
dataDisk = _messages.StringField(1)
class MultiOutputInfo(_messages.Message):
"""Information about an output of a multi-output DoFn.
Fields:
tag: The id of the tag the user code will emit to this output by; this
should correspond to the tag of some SideInputInfo.
"""
tag = _messages.StringField(1)
class NameAndKind(_messages.Message):
"""Basic metadata about a counter.
Enums:
KindValueValuesEnum: Counter aggregation kind.
Fields:
kind: Counter aggregation kind.
name: Name of the counter.
"""
class KindValueValuesEnum(_messages.Enum):
"""Counter aggregation kind.
Values:
INVALID: Counter aggregation kind was not set.
SUM: Aggregated value is the sum of all contributed values.
MAX: Aggregated value is the max of all contributed values.
MIN: Aggregated value is the min of all contributed values.
MEAN: Aggregated value is the mean of all contributed values.
OR: Aggregated value represents the logical 'or' of all contributed
values.
AND: Aggregated value represents the logical 'and' of all contributed
values.
SET: Aggregated value is a set of unique contributed values.
DISTRIBUTION: Aggregated value captures statistics about a distribution.
"""
INVALID = 0
SUM = 1
MAX = 2
MIN = 3
MEAN = 4
OR = 5
AND = 6
SET = 7
DISTRIBUTION = 8
kind = _messages.EnumField('KindValueValuesEnum', 1)
name = _messages.StringField(2)
class Package(_messages.Message):
"""The packages that must be installed in order for a worker to run the
steps of the Cloud Dataflow job that will be assigned to its worker pool.
This is the mechanism by which the Cloud Dataflow SDK causes code to be
loaded onto the workers. For example, the Cloud Dataflow Java SDK might use
this to install jars containing the user's code and all of the various
dependencies (libraries, data files, etc.) required in order for that code
to run.
Fields:
location: The resource to read the package from. The supported resource
type is: Google Cloud Storage: storage.googleapis.com/{bucket}
bucket.storage.googleapis.com/
name: The name of the package.
"""
location = _messages.StringField(1)
name = _messages.StringField(2)
class ParDoInstruction(_messages.Message):
"""An instruction that does a ParDo operation. Takes one main input and zero
or more side inputs, and produces zero or more outputs. Runs user code.
Messages:
UserFnValue: The user function to invoke.
Fields:
input: The input.
multiOutputInfos: Information about each of the outputs, if user_fn is a
MultiDoFn.
numOutputs: The number of outputs.
sideInputs: Zero or more side inputs.
userFn: The user function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserFnValue(_messages.Message):
"""The user function to invoke.
Messages:
AdditionalProperty: An additional property for a UserFnValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
input = _messages.MessageField('InstructionInput', 1)
multiOutputInfos = _messages.MessageField('MultiOutputInfo', 2, repeated=True)
numOutputs = _messages.IntegerField(3, variant=_messages.Variant.INT32)
sideInputs = _messages.MessageField('SideInputInfo', 4, repeated=True)
userFn = _messages.MessageField('UserFnValue', 5)
class ParallelInstruction(_messages.Message):
"""Describes a particular operation comprising a MapTask.
Fields:
flatten: Additional information for Flatten instructions.
name: User-provided name of this operation.
originalName: System-defined name for the operation in the original
workflow graph.
outputs: Describes the outputs of the instruction.
parDo: Additional information for ParDo instructions.
partialGroupByKey: Additional information for PartialGroupByKey
instructions.
read: Additional information for Read instructions.
systemName: System-defined name of this operation. Unique across the
workflow.
write: Additional information for Write instructions.
"""
flatten = _messages.MessageField('FlattenInstruction', 1)
name = _messages.StringField(2)
originalName = _messages.StringField(3)
outputs = _messages.MessageField('InstructionOutput', 4, repeated=True)
parDo = _messages.MessageField('ParDoInstruction', 5)
partialGroupByKey = _messages.MessageField('PartialGroupByKeyInstruction', 6)
read = _messages.MessageField('ReadInstruction', 7)
systemName = _messages.StringField(8)
write = _messages.MessageField('WriteInstruction', 9)
class Parameter(_messages.Message):
"""Structured data associated with this message.
Fields:
key: Key or name for this parameter.
value: Value for this parameter.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
class ParameterMetadata(_messages.Message):
"""Metadata for a specific parameter.
Fields:
helpText: Required. The help text to display for the parameter.
isOptional: Optional. Whether the parameter is optional. Defaults to
false.
label: Required. The label to display for the parameter.
name: Required. The name of the parameter.
regexes: Optional. Regexes that the parameter must match.
"""
helpText = _messages.StringField(1)
isOptional = _messages.BooleanField(2)
label = _messages.StringField(3)
name = _messages.StringField(4)
regexes = _messages.StringField(5, repeated=True)
class PartialGroupByKeyInstruction(_messages.Message):
"""An instruction that does a partial group-by-key. One input and one
output.
Messages:
InputElementCodecValue: The codec to use for interpreting an element in
the input PTable.
ValueCombiningFnValue: The value combining function to invoke.
Fields:
input: Describes the input to the partial group-by-key instruction.
inputElementCodec: The codec to use for interpreting an element in the
input PTable.
originalCombineValuesInputStoreName: If this instruction includes a
combining function this is the name of the intermediate store between
the GBK and the CombineValues.
originalCombineValuesStepName: If this instruction includes a combining
function, this is the name of the CombineValues instruction lifted into
this instruction.
sideInputs: Zero or more side inputs.
valueCombiningFn: The value combining function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class InputElementCodecValue(_messages.Message):
"""The codec to use for interpreting an element in the input PTable.
Messages:
AdditionalProperty: An additional property for a InputElementCodecValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a InputElementCodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ValueCombiningFnValue(_messages.Message):
"""The value combining function to invoke.
Messages:
AdditionalProperty: An additional property for a ValueCombiningFnValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ValueCombiningFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
input = _messages.MessageField('InstructionInput', 1)
inputElementCodec = _messages.MessageField('InputElementCodecValue', 2)
originalCombineValuesInputStoreName = _messages.StringField(3)
originalCombineValuesStepName = _messages.StringField(4)
sideInputs = _messages.MessageField('SideInputInfo', 5, repeated=True)
valueCombiningFn = _messages.MessageField('ValueCombiningFnValue', 6)
class PipelineDescription(_messages.Message):
"""A descriptive representation of submitted pipeline as well as the
executed form. This data is provided by the Dataflow service for ease of
visualizing the pipeline and interpretting Dataflow provided metrics.
Fields:
displayData: Pipeline level display data.
executionPipelineStage: Description of each stage of execution of the
pipeline.
originalPipelineTransform: Description of each transform in the pipeline
and collections between them.
"""
displayData = _messages.MessageField('DisplayData', 1, repeated=True)
executionPipelineStage = _messages.MessageField('ExecutionStageSummary', 2, repeated=True)
originalPipelineTransform = _messages.MessageField('TransformSummary', 3, repeated=True)
class Position(_messages.Message):
"""Position defines a position within a collection of data. The value can
be either the end position, a key (used with ordered collections), a byte
offset, or a record index.
Fields:
byteOffset: Position is a byte offset.
concatPosition: CloudPosition is a concat position.
end: Position is past all other positions. Also useful for the end
position of an unbounded range.
key: Position is a string key, ordered lexicographically.
recordIndex: Position is a record index.
shufflePosition: CloudPosition is a base64 encoded BatchShufflePosition
(with FIXED sharding).
"""
byteOffset = _messages.IntegerField(1)
concatPosition = _messages.MessageField('ConcatPosition', 2)
end = _messages.BooleanField(3)
key = _messages.StringField(4)
recordIndex = _messages.IntegerField(5)
shufflePosition = _messages.StringField(6)
class PubsubLocation(_messages.Message):
"""Identifies a pubsub location to use for transferring data into or out of
a streaming Dataflow job.
Fields:
dropLateData: Indicates whether the pipeline allows late-arriving data.
idLabel: If set, contains a pubsub label from which to extract record ids.
If left empty, record deduplication will be strictly best effort.
subscription: A pubsub subscription, in the form of
"pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
timestampLabel: If set, contains a pubsub label from which to extract
record timestamps. If left empty, record timestamps will be generated
upon arrival.
topic: A pubsub topic, in the form of "pubsub.googleapis.com/topics
/<project-id>/<topic-name>"
trackingSubscription: If set, specifies the pubsub subscription that will
be used for tracking custom time timestamps for watermark estimation.
withAttributes: If true, then the client has requested to get pubsub
attributes.
"""
dropLateData = _messages.BooleanField(1)
idLabel = _messages.StringField(2)
subscription = _messages.StringField(3)
timestampLabel = _messages.StringField(4)
topic = _messages.StringField(5)
trackingSubscription = _messages.StringField(6)
withAttributes = _messages.BooleanField(7)
class ReadInstruction(_messages.Message):
"""An instruction that reads records. Takes no inputs, produces one output.
Fields:
source: The source to read from.
"""
source = _messages.MessageField('Source', 1)
class ReportWorkItemStatusRequest(_messages.Message):
"""Request to report the status of WorkItems.
Fields:
currentWorkerTime: The current timestamp at the worker.
location: The location which contains the WorkItem's job.
workItemStatuses: The order is unimportant, except that the order of the
WorkItemServiceState messages in the ReportWorkItemStatusResponse
corresponds to the order of WorkItemStatus messages here.
workerId: The ID of the worker reporting the WorkItem status. If this
does not match the ID of the worker which the Dataflow service believes
currently has the lease on the WorkItem, the report will be dropped
(with an error response).
"""
currentWorkerTime = _messages.StringField(1)
location = _messages.StringField(2)
workItemStatuses = _messages.MessageField('WorkItemStatus', 3, repeated=True)
workerId = _messages.StringField(4)
class ReportWorkItemStatusResponse(_messages.Message):
"""Response from a request to report the status of WorkItems.
Fields:
workItemServiceStates: A set of messages indicating the service-side state
for each WorkItem whose status was reported, in the same order as the
WorkItemStatus messages in the ReportWorkItemStatusRequest which
resulting in this response.
"""
workItemServiceStates = _messages.MessageField('WorkItemServiceState', 1, repeated=True)
class ReportedParallelism(_messages.Message):
"""Represents the level of parallelism in a WorkItem's input, reported by
the worker.
Fields:
isInfinite: Specifies whether the parallelism is infinite. If true,
"value" is ignored. Infinite parallelism means the service will assume
that the work item can always be split into more non-empty work items by
dynamic splitting. This is a work-around for lack of support for
infinity by the current JSON-based Java RPC stack.
value: Specifies the level of parallelism in case it is finite.
"""
isInfinite = _messages.BooleanField(1)
value = _messages.FloatField(2)
class ResourceUtilizationReport(_messages.Message):
"""Worker metrics exported from workers. This contains resource utilization
metrics accumulated from a variety of sources. For more information, see go
/df-resource-signals.
Fields:
cpuTime: CPU utilization samples.
"""
cpuTime = _messages.MessageField('CPUTime', 1, repeated=True)
class ResourceUtilizationReportResponse(_messages.Message):
"""Service-side response to WorkerMessage reporting resource utilization.
"""
class RuntimeEnvironment(_messages.Message):
"""The environment values to set at runtime.
Fields:
bypassTempDirValidation: Whether to bypass the safety checks for the job's
temporary directory. Use with caution.
machineType: The machine type to use for the job. Defaults to the value
from the template if not specified.
maxWorkers: The maximum number of Google Compute Engine instances to be
made available to your pipeline during execution, from 1 to 1000.
serviceAccountEmail: The email address of the service account to run the
job as.
tempLocation: The Cloud Storage path to use for temporary files. Must be a
valid Cloud Storage URL, beginning with `gs://`.
zone: The Compute Engine [availability
zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
for launching worker instances to run your pipeline.
"""
bypassTempDirValidation = _messages.BooleanField(1)
machineType = _messages.StringField(2)
maxWorkers = _messages.IntegerField(3, variant=_messages.Variant.INT32)
serviceAccountEmail = _messages.StringField(4)
tempLocation = _messages.StringField(5)
zone = _messages.StringField(6)
class SendDebugCaptureRequest(_messages.Message):
"""Request to send encoded debug information.
Fields:
componentId: The internal component id for which debug information is
sent.
data: The encoded debug information.
location: The location which contains the job specified by job_id.
workerId: The worker id, i.e., VM hostname.
"""
componentId = _messages.StringField(1)
data = _messages.StringField(2)
location = _messages.StringField(3)
workerId = _messages.StringField(4)
class SendDebugCaptureResponse(_messages.Message):
"""Response to a send capture request.
nothing"""
class SendWorkerMessagesRequest(_messages.Message):
"""A request for sending worker messages to the service.
Fields:
location: The location which contains the job
workerMessages: The WorkerMessages to send.
"""
location = _messages.StringField(1)
workerMessages = _messages.MessageField('WorkerMessage', 2, repeated=True)
class SendWorkerMessagesResponse(_messages.Message):
"""The response to the worker messages.
Fields:
workerMessageResponses: The servers response to the worker messages.
"""
workerMessageResponses = _messages.MessageField('WorkerMessageResponse', 1, repeated=True)
class SeqMapTask(_messages.Message):
"""Describes a particular function to invoke.
Messages:
UserFnValue: The user function to invoke.
Fields:
inputs: Information about each of the inputs.
name: The user-provided name of the SeqDo operation.
outputInfos: Information about each of the outputs.
stageName: System-defined name of the stage containing the SeqDo
operation. Unique across the workflow.
systemName: System-defined name of the SeqDo operation. Unique across the
workflow.
userFn: The user function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserFnValue(_messages.Message):
"""The user function to invoke.
Messages:
AdditionalProperty: An additional property for a UserFnValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
inputs = _messages.MessageField('SideInputInfo', 1, repeated=True)
name = _messages.StringField(2)
outputInfos = _messages.MessageField('SeqMapTaskOutputInfo', 3, repeated=True)
stageName = _messages.StringField(4)
systemName = _messages.StringField(5)
userFn = _messages.MessageField('UserFnValue', 6)
class SeqMapTaskOutputInfo(_messages.Message):
"""Information about an output of a SeqMapTask.
Fields:
sink: The sink to write the output value to.
tag: The id of the TupleTag the user code will tag the output value by.
"""
sink = _messages.MessageField('Sink', 1)
tag = _messages.StringField(2)
class ShellTask(_messages.Message):
"""A task which consists of a shell command for the worker to execute.
Fields:
command: The shell command to run.
exitCode: Exit code for the task.
"""
command = _messages.StringField(1)
exitCode = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class SideInputInfo(_messages.Message):
"""Information about a side input of a DoFn or an input of a SeqDoFn.
Messages:
KindValue: How to interpret the source element(s) as a side input value.
Fields:
kind: How to interpret the source element(s) as a side input value.
sources: The source(s) to read element(s) from to get the value of this
side input. If more than one source, then the elements are taken from
the sources, in the specified order if order matters. At least one
source is required.
tag: The id of the tag the user code will access this side input by; this
should correspond to the tag of some MultiOutputInfo.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class KindValue(_messages.Message):
"""How to interpret the source element(s) as a side input value.
Messages:
AdditionalProperty: An additional property for a KindValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a KindValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
kind = _messages.MessageField('KindValue', 1)
sources = _messages.MessageField('Source', 2, repeated=True)
tag = _messages.StringField(3)
class Sink(_messages.Message):
"""A sink that records can be encoded and written to.
Messages:
CodecValue: The codec to use to encode data written to the sink.
SpecValue: The sink to write to, plus its parameters.
Fields:
codec: The codec to use to encode data written to the sink.
spec: The sink to write to, plus its parameters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
"""The codec to use to encode data written to the sink.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SpecValue(_messages.Message):
"""The sink to write to, plus its parameters.
Messages:
AdditionalProperty: An additional property for a SpecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a SpecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 1)
spec = _messages.MessageField('SpecValue', 2)
class Source(_messages.Message):
"""A source that records can be read and decoded from.
Messages:
BaseSpecsValueListEntry: A BaseSpecsValueListEntry object.
CodecValue: The codec to use to decode data read from the source.
SpecValue: The source to read from, plus its parameters.
Fields:
baseSpecs: While splitting, sources may specify the produced bundles as
differences against another source, in order to save backend-side memory
and allow bigger jobs. For details, see SourceSplitRequest. To support
this use case, the full set of parameters of the source is logically
obtained by taking the latest explicitly specified value of each
parameter in the order: base_specs (later items win), spec (overrides
anything in base_specs).
codec: The codec to use to decode data read from the source.
doesNotNeedSplitting: Setting this value to true hints to the framework
that the source doesn't need splitting, and using SourceSplitRequest on
it would yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter
may set this to true when splitting a single file into a set of byte
ranges of appropriate size, and set this to false when splitting a
filepattern into individual files. However, for efficiency, a file
splitter may decide to produce file subranges directly from the
filepattern to avoid a splitting round-trip. See SourceSplitRequest for
an overview of the splitting process. This field is meaningful only in
the Source objects populated by the user (e.g. when filling in a
DerivedSource). Source objects supplied by the framework to the user
don't have this field populated.
metadata: Optionally, metadata for this source can be supplied right away,
avoiding a SourceGetMetadataOperation roundtrip (see
SourceOperationRequest). This field is meaningful only in the Source
objects populated by the user (e.g. when filling in a DerivedSource).
Source objects supplied by the framework to the user don't have this
field populated.
spec: The source to read from, plus its parameters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class BaseSpecsValueListEntry(_messages.Message):
"""A BaseSpecsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a BaseSpecsValueListEntry
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a BaseSpecsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
"""The codec to use to decode data read from the source.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SpecValue(_messages.Message):
"""The source to read from, plus its parameters.
Messages:
AdditionalProperty: An additional property for a SpecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a SpecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
baseSpecs = _messages.MessageField('BaseSpecsValueListEntry', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 2)
doesNotNeedSplitting = _messages.BooleanField(3)
metadata = _messages.MessageField('SourceMetadata', 4)
spec = _messages.MessageField('SpecValue', 5)
class SourceFork(_messages.Message):
"""DEPRECATED in favor of DynamicSourceSplit.
Fields:
primary: DEPRECATED
primarySource: DEPRECATED
residual: DEPRECATED
residualSource: DEPRECATED
"""
primary = _messages.MessageField('SourceSplitShard', 1)
primarySource = _messages.MessageField('DerivedSource', 2)
residual = _messages.MessageField('SourceSplitShard', 3)
residualSource = _messages.MessageField('DerivedSource', 4)
class SourceGetMetadataRequest(_messages.Message):
"""A request to compute the SourceMetadata of a Source.
Fields:
source: Specification of the source whose metadata should be computed.
"""
source = _messages.MessageField('Source', 1)
class SourceGetMetadataResponse(_messages.Message):
"""The result of a SourceGetMetadataOperation.
Fields:
metadata: The computed metadata.
"""
metadata = _messages.MessageField('SourceMetadata', 1)
class SourceMetadata(_messages.Message):
"""Metadata about a Source useful for automatically optimizing and tuning
the pipeline, etc.
Fields:
estimatedSizeBytes: An estimate of the total size (in bytes) of the data
that would be read from this source. This estimate is in terms of
external storage size, before any decompression or other processing done
by the reader.
infinite: Specifies that the size of this source is known to be infinite
(this is a streaming source).
producesSortedKeys: Whether this source is known to produce key/value
pairs with the (encoded) keys in lexicographically sorted order.
"""
estimatedSizeBytes = _messages.IntegerField(1)
infinite = _messages.BooleanField(2)
producesSortedKeys = _messages.BooleanField(3)
class SourceOperationRequest(_messages.Message):
"""A work item that represents the different operations that can be
performed on a user-defined Source specification.
Fields:
getMetadata: Information about a request to get metadata about a source.
split: Information about a request to split a source.
"""
getMetadata = _messages.MessageField('SourceGetMetadataRequest', 1)
split = _messages.MessageField('SourceSplitRequest', 2)
class SourceOperationResponse(_messages.Message):
"""The result of a SourceOperationRequest, specified in
ReportWorkItemStatusRequest.source_operation when the work item is
completed.
Fields:
getMetadata: A response to a request to get metadata about a source.
split: A response to a request to split a source.
"""
getMetadata = _messages.MessageField('SourceGetMetadataResponse', 1)
split = _messages.MessageField('SourceSplitResponse', 2)
class SourceSplitOptions(_messages.Message):
"""Hints for splitting a Source into bundles (parts for parallel processing)
using SourceSplitRequest.
Fields:
desiredBundleSizeBytes: The source should be split into a set of bundles
where the estimated size of each is approximately this many bytes.
desiredShardSizeBytes: DEPRECATED in favor of desired_bundle_size_bytes.
"""
desiredBundleSizeBytes = _messages.IntegerField(1)
desiredShardSizeBytes = _messages.IntegerField(2)
class SourceSplitRequest(_messages.Message):
"""Represents the operation to split a high-level Source specification into
bundles (parts for parallel processing). At a high level, splitting of a
source into bundles happens as follows: SourceSplitRequest is applied to the
source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting
happens and the source is used "as is". Otherwise, splitting is applied
recursively to each produced DerivedSource. As an optimization, for any
Source, if its does_not_need_splitting is true, the framework assumes that
splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and
doesn't initiate a SourceSplitRequest. This applies both to the initial
source being split and to bundles produced from it.
Fields:
options: Hints for tuning the splitting process.
source: Specification of the source to be split.
"""
options = _messages.MessageField('SourceSplitOptions', 1)
source = _messages.MessageField('Source', 2)
class SourceSplitResponse(_messages.Message):
"""The response to a SourceSplitRequest.
Enums:
OutcomeValueValuesEnum: Indicates whether splitting happened and produced
a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current
source should be processed "as is" without splitting. "bundles" is
ignored in this case. If this is SPLITTING_HAPPENED, then "bundles"
contains a list of bundles into which the source was split.
Fields:
bundles: If outcome is SPLITTING_HAPPENED, then this is a list of bundles
into which the source was split. Otherwise this field is ignored. This
list can be empty, which means the source represents an empty input.
outcome: Indicates whether splitting happened and produced a list of
bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should
be processed "as is" without splitting. "bundles" is ignored in this
case. If this is SPLITTING_HAPPENED, then "bundles" contains a list of
bundles into which the source was split.
shards: DEPRECATED in favor of bundles.
"""
class OutcomeValueValuesEnum(_messages.Enum):
"""Indicates whether splitting happened and produced a list of bundles. If
this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed
"as is" without splitting. "bundles" is ignored in this case. If this is
SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which
the source was split.
Values:
SOURCE_SPLIT_OUTCOME_UNKNOWN: The source split outcome is unknown, or
unspecified.
SOURCE_SPLIT_OUTCOME_USE_CURRENT: The current source should be processed
"as is" without splitting.
SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED: Splitting produced a list of
bundles.
"""
SOURCE_SPLIT_OUTCOME_UNKNOWN = 0
SOURCE_SPLIT_OUTCOME_USE_CURRENT = 1
SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED = 2
bundles = _messages.MessageField('DerivedSource', 1, repeated=True)
outcome = _messages.EnumField('OutcomeValueValuesEnum', 2)
shards = _messages.MessageField('SourceSplitShard', 3, repeated=True)
class SourceSplitShard(_messages.Message):
"""DEPRECATED in favor of DerivedSource.
Enums:
DerivationModeValueValuesEnum: DEPRECATED
Fields:
derivationMode: DEPRECATED
source: DEPRECATED
"""
class DerivationModeValueValuesEnum(_messages.Enum):
"""DEPRECATED
Values:
SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or
unspecified.
SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent
Source with no base.
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the
Source being split.
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the
base of the Source being split.
"""
SOURCE_DERIVATION_MODE_UNKNOWN = 0
SOURCE_DERIVATION_MODE_INDEPENDENT = 1
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3
derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1)
source = _messages.MessageField('Source', 2)
class SplitInt64(_messages.Message):
"""A representation of an int64, n, that is immune to precision loss when
encoded in JSON.
Fields:
highBits: The high order bits, including the sign: n >> 32.
lowBits: The low order bits: n & 0xffffffff.
"""
highBits = _messages.IntegerField(1, variant=_messages.Variant.INT32)
lowBits = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
class StageSource(_messages.Message):
"""Description of an input or output of an execution stage.
Fields:
name: Dataflow service generated name for this source.
originalTransformOrCollection: User name for the original user transform
or collection with which this source is most closely associated.
sizeBytes: Size of the source, if measurable.
userName: Human-readable name for this source; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransformOrCollection = _messages.StringField(2)
sizeBytes = _messages.IntegerField(3)
userName = _messages.StringField(4)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class StateFamilyConfig(_messages.Message):
"""State family configuration.
Fields:
isRead: If true, this family corresponds to a read operation.
stateFamily: The state family value.
"""
isRead = _messages.BooleanField(1)
stateFamily = _messages.StringField(2)
class Status(_messages.Message):
"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). The error model is designed to be:
- Simple to use and understand for most users - Flexible enough to meet
unexpected needs # Overview The `Status` message contains three pieces of
data: error code, error message, and error details. The error code should be
an enum value of google.rpc.Code, but it may accept additional error codes
if needed. The error message should be a developer-facing English message
that helps developers *understand* and *resolve* the error. If a localized
user-facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package `google.rpc` that can be used for common error
conditions. # Language mapping The `Status` message is the logical
representation of the error model, but it is not necessarily the actual wire
format. When the `Status` message is exposed in different client libraries
and different wire protocols, it can be mapped differently. For example, it
will likely be mapped to some exceptions in Java, but more likely mapped to
some error codes in C. # Other uses The error model and the `Status`
message can be used in a variety of environments, either with or without
APIs, to provide a consistent developer experience across different
environments. Example uses of this error model include: - Partial errors.
If a service needs to return partial errors to the client, it may embed
the `Status` in the normal response to indicate the partial errors. -
Workflow errors. A typical workflow has multiple steps. Each step may
have a `Status` message for error reporting. - Batch operations. If a
client uses batch request and batch response, the `Status` message
should be used directly inside batch response, one for each error sub-
response. - Asynchronous operations. If an API call embeds asynchronous
operation results in its response, the status of those operations should
be represented directly using the `Status` message. - Logging. If some
API errors are stored in logs, the message `Status` could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Step(_messages.Message):
"""Defines a particular step within a Cloud Dataflow job. A job consists of
multiple steps, each of which performs some specific operation as part of
the overall job. Data is typically passed from one step to another as part
of the job. Here's an example of a sequence of steps which together
implement a Map-Reduce job: * Read a collection of data from some source,
parsing the collection's elements. * Validate the elements. *
Apply a user-defined function to map each element to some value and
extract an element-specific key value. * Group elements with the same key
into a single element with that key, transforming a multiply-keyed
collection into a uniquely-keyed collection. * Write the elements out
to some data sink. Note that the Cloud Dataflow service may be used to run
many different types of jobs, not just Map-Reduce.
Messages:
PropertiesValue: Named properties associated with the step. Each kind of
predefined step has its own required set of properties. Must be provided
on Create. Only retrieved with JOB_VIEW_ALL.
Fields:
kind: The kind of step in the Cloud Dataflow job.
name: The name that identifies the step. This must be unique for each step
with respect to all other steps in the Cloud Dataflow job.
properties: Named properties associated with the step. Each kind of
predefined step has its own required set of properties. Must be provided
on Create. Only retrieved with JOB_VIEW_ALL.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Named properties associated with the step. Each kind of predefined step
has its own required set of properties. Must be provided on Create. Only
retrieved with JOB_VIEW_ALL.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
kind = _messages.StringField(1)
name = _messages.StringField(2)
properties = _messages.MessageField('PropertiesValue', 3)
class StreamLocation(_messages.Message):
"""Describes a stream of data, either as input to be processed or as output
of a streaming Dataflow job.
Fields:
customSourceLocation: The stream is a custom source.
pubsubLocation: The stream is a pubsub stream.
sideInputLocation: The stream is a streaming side input.
streamingStageLocation: The stream is part of another computation within
the current streaming Dataflow job.
"""
customSourceLocation = _messages.MessageField('CustomSourceLocation', 1)
pubsubLocation = _messages.MessageField('PubsubLocation', 2)
sideInputLocation = _messages.MessageField('StreamingSideInputLocation', 3)
streamingStageLocation = _messages.MessageField('StreamingStageLocation', 4)
class StreamingComputationConfig(_messages.Message):
"""Configuration information for a single streaming computation.
Fields:
computationId: Unique identifier for this computation.
instructions: Instructions that comprise the computation.
stageName: Stage name of this computation.
systemName: System defined name for this computation.
"""
computationId = _messages.StringField(1)
instructions = _messages.MessageField('ParallelInstruction', 2, repeated=True)
stageName = _messages.StringField(3)
systemName = _messages.StringField(4)
class StreamingComputationRanges(_messages.Message):
"""Describes full or partial data disk assignment information of the
computation ranges.
Fields:
computationId: The ID of the computation.
rangeAssignments: Data disk assignments for ranges from this computation.
"""
computationId = _messages.StringField(1)
rangeAssignments = _messages.MessageField('KeyRangeDataDiskAssignment', 2, repeated=True)
class StreamingComputationTask(_messages.Message):
"""A task which describes what action should be performed for the specified
streaming computation ranges.
Enums:
TaskTypeValueValuesEnum: A type of streaming computation task.
Fields:
computationRanges: Contains ranges of a streaming computation this task
should apply to.
dataDisks: Describes the set of data disks this task should apply to.
taskType: A type of streaming computation task.
"""
class TaskTypeValueValuesEnum(_messages.Enum):
"""A type of streaming computation task.
Values:
STREAMING_COMPUTATION_TASK_UNKNOWN: The streaming computation task is
unknown, or unspecified.
STREAMING_COMPUTATION_TASK_STOP: Stop processing specified streaming
computation range(s).
STREAMING_COMPUTATION_TASK_START: Start processing specified streaming
computation range(s).
"""
STREAMING_COMPUTATION_TASK_UNKNOWN = 0
STREAMING_COMPUTATION_TASK_STOP = 1
STREAMING_COMPUTATION_TASK_START = 2
computationRanges = _messages.MessageField('StreamingComputationRanges', 1, repeated=True)
dataDisks = _messages.MessageField('MountedDataDisk', 2, repeated=True)
taskType = _messages.EnumField('TaskTypeValueValuesEnum', 3)
class StreamingConfigTask(_messages.Message):
"""A task that carries configuration information for streaming computations.
Messages:
UserStepToStateFamilyNameMapValue: Map from user step names to state
families.
Fields:
streamingComputationConfigs: Set of computation configuration information.
userStepToStateFamilyNameMap: Map from user step names to state families.
windmillServiceEndpoint: If present, the worker must use this endpoint to
communicate with Windmill Service dispatchers, otherwise the worker must
continue to use whatever endpoint it had been using.
windmillServicePort: If present, the worker must use this port to
communicate with Windmill Service dispatchers. Only applicable when
windmill_service_endpoint is specified.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserStepToStateFamilyNameMapValue(_messages.Message):
"""Map from user step names to state families.
Messages:
AdditionalProperty: An additional property for a
UserStepToStateFamilyNameMapValue object.
Fields:
additionalProperties: Additional properties of type
UserStepToStateFamilyNameMapValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserStepToStateFamilyNameMapValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
streamingComputationConfigs = _messages.MessageField('StreamingComputationConfig', 1, repeated=True)
userStepToStateFamilyNameMap = _messages.MessageField('UserStepToStateFamilyNameMapValue', 2)
windmillServiceEndpoint = _messages.StringField(3)
windmillServicePort = _messages.IntegerField(4)
class StreamingSetupTask(_messages.Message):
"""A task which initializes part of a streaming Dataflow job.
Fields:
drain: The user has requested drain.
receiveWorkPort: The TCP port on which the worker should listen for
messages from other streaming computation workers.
streamingComputationTopology: The global topology of the streaming
Dataflow job.
workerHarnessPort: The TCP port used by the worker to communicate with the
Dataflow worker harness.
"""
drain = _messages.BooleanField(1)
receiveWorkPort = _messages.IntegerField(2, variant=_messages.Variant.INT32)
streamingComputationTopology = _messages.MessageField('TopologyConfig', 3)
workerHarnessPort = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class StreamingSideInputLocation(_messages.Message):
"""Identifies the location of a streaming side input.
Fields:
stateFamily: Identifies the state family where this side input is stored.
tag: Identifies the particular side input within the streaming Dataflow
job.
"""
stateFamily = _messages.StringField(1)
tag = _messages.StringField(2)
class StreamingStageLocation(_messages.Message):
"""Identifies the location of a streaming computation stage, for stage-to-
stage communication.
Fields:
streamId: Identifies the particular stream within the streaming Dataflow
job.
"""
streamId = _messages.StringField(1)
class StringList(_messages.Message):
"""A metric value representing a list of strings.
Fields:
elements: Elements of the list.
"""
elements = _messages.StringField(1, repeated=True)
class StructuredMessage(_messages.Message):
"""A rich message format, including a human readable string, a key for
identifying the message, and structured data associated with the message for
programmatic consumption.
Fields:
messageKey: Idenfier for this message type. Used by external systems to
internationalize or personalize message.
messageText: Human-readable version of message.
parameters: The structured data associated with this message.
"""
messageKey = _messages.StringField(1)
messageText = _messages.StringField(2)
parameters = _messages.MessageField('Parameter', 3, repeated=True)
class TaskRunnerSettings(_messages.Message):
"""Taskrunner configuration settings.
Fields:
alsologtostderr: Whether to also send taskrunner log info to stderr.
baseTaskDir: The location on the worker for task-specific subdirectories.
baseUrl: The base URL for the taskrunner to use when accessing Google
Cloud APIs. When workers access Google Cloud APIs, they logically do so
via relative URLs. If this field is specified, it supplies the base URL
to use for resolving these relative URLs. The normative algorithm used
is defined by RFC 1808, "Relative Uniform Resource Locators". If not
specified, the default value is "http://www.googleapis.com/"
commandlinesFileName: The file to store preprocessing commands in.
continueOnException: Whether to continue taskrunner if an exception is
hit.
dataflowApiVersion: The API version of endpoint, e.g. "v1b3"
harnessCommand: The command to launch the worker harness.
languageHint: The suggested backend language.
logDir: The directory on the VM to store logs.
logToSerialconsole: Whether to send taskrunner log info to Google Compute
Engine VM serial console.
logUploadLocation: Indicates where to put logs. If this is not specified,
the logs will not be uploaded. The supported resource type is: Google
Cloud Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
oauthScopes: The OAuth2 scopes to be requested by the taskrunner in order
to access the Cloud Dataflow API.
parallelWorkerSettings: The settings to pass to the parallel worker
harness.
streamingWorkerMainClass: The streaming worker main class name.
taskGroup: The UNIX group ID on the worker VM to use for tasks launched by
taskrunner; e.g. "wheel".
taskUser: The UNIX user ID on the worker VM to use for tasks launched by
taskrunner; e.g. "root".
tempStoragePrefix: The prefix of the resources the taskrunner should use
for temporary storage. The supported resource type is: Google Cloud
Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
vmId: The ID string of the VM.
workflowFileName: The file to store the workflow in.
"""
alsologtostderr = _messages.BooleanField(1)
baseTaskDir = _messages.StringField(2)
baseUrl = _messages.StringField(3)
commandlinesFileName = _messages.StringField(4)
continueOnException = _messages.BooleanField(5)
dataflowApiVersion = _messages.StringField(6)
harnessCommand = _messages.StringField(7)
languageHint = _messages.StringField(8)
logDir = _messages.StringField(9)
logToSerialconsole = _messages.BooleanField(10)
logUploadLocation = _messages.StringField(11)
oauthScopes = _messages.StringField(12, repeated=True)
parallelWorkerSettings = _messages.MessageField('WorkerSettings', 13)
streamingWorkerMainClass = _messages.StringField(14)
taskGroup = _messages.StringField(15)
taskUser = _messages.StringField(16)
tempStoragePrefix = _messages.StringField(17)
vmId = _messages.StringField(18)
workflowFileName = _messages.StringField(19)
class TemplateMetadata(_messages.Message):
"""Metadata describing a template.
Fields:
description: Optional. A description of the template.
name: Required. The name of the template.
parameters: The parameters for the template.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
parameters = _messages.MessageField('ParameterMetadata', 3, repeated=True)
class TopologyConfig(_messages.Message):
"""Global topology of the streaming Dataflow job, including all computations
and their sharded locations.
Messages:
UserStageToComputationNameMapValue: Maps user stage names to stable
computation names.
Fields:
computations: The computations associated with a streaming Dataflow job.
dataDiskAssignments: The disks assigned to a streaming Dataflow job.
forwardingKeyBits: The size (in bits) of keys that will be assigned to
source messages.
persistentStateVersion: Version number for persistent state.
userStageToComputationNameMap: Maps user stage names to stable computation
names.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserStageToComputationNameMapValue(_messages.Message):
"""Maps user stage names to stable computation names.
Messages:
AdditionalProperty: An additional property for a
UserStageToComputationNameMapValue object.
Fields:
additionalProperties: Additional properties of type
UserStageToComputationNameMapValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserStageToComputationNameMapValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
computations = _messages.MessageField('ComputationTopology', 1, repeated=True)
dataDiskAssignments = _messages.MessageField('DataDiskAssignment', 2, repeated=True)
forwardingKeyBits = _messages.IntegerField(3, variant=_messages.Variant.INT32)
persistentStateVersion = _messages.IntegerField(4, variant=_messages.Variant.INT32)
userStageToComputationNameMap = _messages.MessageField('UserStageToComputationNameMapValue', 5)
class TransformSummary(_messages.Message):
"""Description of the type, names/ids, and input/outputs for a transform.
Enums:
KindValueValuesEnum: Type of transform.
Fields:
displayData: Transform-specific display data.
id: SDK generated id of this transform instance.
inputCollectionName: User names for all collection inputs to this
transform.
kind: Type of transform.
name: User provided name for this transform instance.
outputCollectionName: User names for all collection outputs to this
transform.
"""
class KindValueValuesEnum(_messages.Enum):
"""Type of transform.
Values:
UNKNOWN_KIND: Unrecognized transform type.
PAR_DO_KIND: ParDo transform.
GROUP_BY_KEY_KIND: Group By Key transform.
FLATTEN_KIND: Flatten transform.
READ_KIND: Read transform.
WRITE_KIND: Write transform.
CONSTANT_KIND: Constructs from a constant value, such as with Create.of.
SINGLETON_KIND: Creates a Singleton view of a collection.
SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a
GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
FLATTEN_KIND = 3
READ_KIND = 4
WRITE_KIND = 5
CONSTANT_KIND = 6
SINGLETON_KIND = 7
SHUFFLE_KIND = 8
displayData = _messages.MessageField('DisplayData', 1, repeated=True)
id = _messages.StringField(2)
inputCollectionName = _messages.StringField(3, repeated=True)
kind = _messages.EnumField('KindValueValuesEnum', 4)
name = _messages.StringField(5)
outputCollectionName = _messages.StringField(6, repeated=True)
class WorkItem(_messages.Message):
"""WorkItem represents basic information about a WorkItem to be executed in
the cloud.
Fields:
configuration: Work item-specific configuration as an opaque blob.
id: Identifies this WorkItem.
initialReportIndex: The initial index to use when reporting the status of
the WorkItem.
jobId: Identifies the workflow job this WorkItem belongs to.
leaseExpireTime: Time when the lease on this Work will expire.
mapTask: Additional information for MapTask WorkItems.
packages: Any required packages that need to be fetched in order to
execute this WorkItem.
projectId: Identifies the cloud project this WorkItem belongs to.
reportStatusInterval: Recommended reporting interval.
seqMapTask: Additional information for SeqMapTask WorkItems.
shellTask: Additional information for ShellTask WorkItems.
sourceOperationTask: Additional information for source operation
WorkItems.
streamingComputationTask: Additional information for
StreamingComputationTask WorkItems.
streamingConfigTask: Additional information for StreamingConfigTask
WorkItems.
streamingSetupTask: Additional information for StreamingSetupTask
WorkItems.
"""
configuration = _messages.StringField(1)
id = _messages.IntegerField(2)
initialReportIndex = _messages.IntegerField(3)
jobId = _messages.StringField(4)
leaseExpireTime = _messages.StringField(5)
mapTask = _messages.MessageField('MapTask', 6)
packages = _messages.MessageField('Package', 7, repeated=True)
projectId = _messages.StringField(8)
reportStatusInterval = _messages.StringField(9)
seqMapTask = _messages.MessageField('SeqMapTask', 10)
shellTask = _messages.MessageField('ShellTask', 11)
sourceOperationTask = _messages.MessageField('SourceOperationRequest', 12)
streamingComputationTask = _messages.MessageField('StreamingComputationTask', 13)
streamingConfigTask = _messages.MessageField('StreamingConfigTask', 14)
streamingSetupTask = _messages.MessageField('StreamingSetupTask', 15)
class WorkItemServiceState(_messages.Message):
"""The Dataflow service's idea of the current state of a WorkItem being
processed by a worker.
Messages:
HarnessDataValue: Other data returned by the service, specific to the
particular worker harness.
Fields:
harnessData: Other data returned by the service, specific to the
particular worker harness.
leaseExpireTime: Time at which the current lease will expire.
metricShortId: The short ids that workers should use in subsequent metric
updates. Workers should strive to use short ids whenever possible, but
it is ok to request the short_id again if a worker lost track of it
(e.g. if the worker is recovering from a crash). NOTE: it is possible
that the response may have short ids for a subset of the metrics.
nextReportIndex: The index value to use for the next report sent by the
worker. Note: If the report call fails for whatever reason, the worker
should reuse this index for subsequent report attempts.
reportStatusInterval: New recommended reporting interval.
splitRequest: The progress point in the WorkItem where the Dataflow
service suggests that the worker truncate the task.
suggestedStopPoint: DEPRECATED in favor of split_request.
suggestedStopPosition: Obsolete, always empty.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HarnessDataValue(_messages.Message):
"""Other data returned by the service, specific to the particular worker
harness.
Messages:
AdditionalProperty: An additional property for a HarnessDataValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a HarnessDataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
harnessData = _messages.MessageField('HarnessDataValue', 1)
leaseExpireTime = _messages.StringField(2)
metricShortId = _messages.MessageField('MetricShortId', 3, repeated=True)
nextReportIndex = _messages.IntegerField(4)
reportStatusInterval = _messages.StringField(5)
splitRequest = _messages.MessageField('ApproximateSplitRequest', 6)
suggestedStopPoint = _messages.MessageField('ApproximateProgress', 7)
suggestedStopPosition = _messages.MessageField('Position', 8)
class WorkItemStatus(_messages.Message):
"""Conveys a worker's progress through the work described by a WorkItem.
Fields:
completed: True if the WorkItem was completed (successfully or
unsuccessfully).
counterUpdates: Worker output counters for this WorkItem.
dynamicSourceSplit: See documentation of stop_position.
errors: Specifies errors which occurred during processing. If errors are
provided, and completed = true, then the WorkItem is considered to have
failed.
metricUpdates: DEPRECATED in favor of counter_updates.
progress: DEPRECATED in favor of reported_progress.
reportIndex: The report index. When a WorkItem is leased, the lease will
contain an initial report index. When a WorkItem's status is reported
to the system, the report should be sent with that report index, and the
response will contain the index the worker should use for the next
report. Reports received with unexpected index values will be rejected
by the service. In order to preserve idempotency, the worker should not
alter the contents of a report, even if the worker must submit the same
report multiple times before getting back a response. The worker should
not submit a subsequent report until the response for the previous
report had been received from the service.
reportedProgress: The worker's progress through this WorkItem.
requestedLeaseDuration: Amount of time the worker requests for its lease.
sourceFork: DEPRECATED in favor of dynamic_source_split.
sourceOperationResponse: If the work item represented a
SourceOperationRequest, and the work is completed, contains the result
of the operation.
stopPosition: A worker may split an active map task in two parts,
"primary" and "residual", continuing to process the primary part and
returning the residual part into the pool of available work. This event
is called a "dynamic split" and is critical to the dynamic work
rebalancing feature. The two obtained sub-tasks are called "parts" of
the split. The parts, if concatenated, must represent the same input as
would be read by the current task if the split did not happen. The exact
way in which the original task is decomposed into the two parts is
specified either as a position demarcating them (stop_position), or
explicitly as two DerivedSources, if this task consumes a user-defined
source type (dynamic_source_split). The "current" task is adjusted as a
result of the split: after a task with range [A, B) sends a
stop_position update at C, its range is considered to be [A, C), e.g.: *
Progress should be interpreted relative to the new range, e.g. "75%
completed" means "75% of [A, C) completed" * The worker should interpret
proposed_stop_position relative to the new range, e.g. "split at 68%"
should be interpreted as "split at 68% of [A, C)". * If the worker
chooses to split again using stop_position, only stop_positions in [A,
C) will be accepted. * Etc. dynamic_source_split has similar semantics:
e.g., if a task with source S splits using dynamic_source_split into {P,
R} (where P and R must be together equivalent to S), then subsequent
progress and proposed_stop_position should be interpreted relative to P,
and in a potential subsequent dynamic_source_split into {P', R'}, P' and
R' must be together equivalent to P, etc.
workItemId: Identifies the WorkItem.
"""
completed = _messages.BooleanField(1)
counterUpdates = _messages.MessageField('CounterUpdate', 2, repeated=True)
dynamicSourceSplit = _messages.MessageField('DynamicSourceSplit', 3)
errors = _messages.MessageField('Status', 4, repeated=True)
metricUpdates = _messages.MessageField('MetricUpdate', 5, repeated=True)
progress = _messages.MessageField('ApproximateProgress', 6)
reportIndex = _messages.IntegerField(7)
reportedProgress = _messages.MessageField('ApproximateReportedProgress', 8)
requestedLeaseDuration = _messages.StringField(9)
sourceFork = _messages.MessageField('SourceFork', 10)
sourceOperationResponse = _messages.MessageField('SourceOperationResponse', 11)
stopPosition = _messages.MessageField('Position', 12)
workItemId = _messages.StringField(13)
class WorkerHealthReport(_messages.Message):
"""WorkerHealthReport contains information about the health of a worker.
The VM should be identified by the labels attached to the WorkerMessage that
this health ping belongs to.
Messages:
PodsValueListEntry: A PodsValueListEntry object.
Fields:
pods: The pods running on the worker. See: http://kubernetes.io/v1.1/docs
/api-reference/v1/definitions.html#_v1_pod This field is used by the
worker to send the status of the indvidual containers running on each
worker.
reportInterval: The interval at which the worker is sending health
reports. The default value of 0 should be interpreted as the field is
not being explicitly set by the worker.
vmIsHealthy: Whether the VM is healthy.
vmStartupTime: The time the VM was booted.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PodsValueListEntry(_messages.Message):
"""A PodsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a PodsValueListEntry
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PodsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
pods = _messages.MessageField('PodsValueListEntry', 1, repeated=True)
reportInterval = _messages.StringField(2)
vmIsHealthy = _messages.BooleanField(3)
vmStartupTime = _messages.StringField(4)
class WorkerHealthReportResponse(_messages.Message):
"""WorkerHealthReportResponse contains information returned to the worker in
response to a health ping.
Fields:
reportInterval: A positive value indicates the worker should change its
reporting interval to the specified value. The default value of zero
means no change in report rate is requested by the server.
"""
reportInterval = _messages.StringField(1)
class WorkerMessage(_messages.Message):
"""WorkerMessage provides information to the backend about a worker.
Messages:
LabelsValue: Labels are used to group WorkerMessages. For example, a
worker_message about a particular container might have the labels: {
"JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026"
"CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags
typically correspond to Label enum values. However, for ease of
development other strings can be used as tags. LABEL_UNSPECIFIED should
not be used here.
Fields:
labels: Labels are used to group WorkerMessages. For example, a
worker_message about a particular container might have the labels: {
"JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026"
"CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags
typically correspond to Label enum values. However, for ease of
development other strings can be used as tags. LABEL_UNSPECIFIED should
not be used here.
time: The timestamp of the worker_message.
workerHealthReport: The health of a worker.
workerMessageCode: A worker message code.
workerMetrics: Resource metrics reported by workers.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Labels are used to group WorkerMessages. For example, a worker_message
about a particular container might have the labels: { "JOB_ID":
"2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026" "CONTAINER_TYPE":
"worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond
to Label enum values. However, for ease of development other strings can
be used as tags. LABEL_UNSPECIFIED should not be used here.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
labels = _messages.MessageField('LabelsValue', 1)
time = _messages.StringField(2)
workerHealthReport = _messages.MessageField('WorkerHealthReport', 3)
workerMessageCode = _messages.MessageField('WorkerMessageCode', 4)
workerMetrics = _messages.MessageField('ResourceUtilizationReport', 5)
class WorkerMessageCode(_messages.Message):
"""A message code is used to report status and error messages to the
service. The message codes are intended to be machine readable. The service
will take care of translating these into user understandable messages if
necessary. Example use cases: 1. Worker processes reporting successful
startup. 2. Worker processes reporting specific errors (e.g. package
staging failure).
Messages:
ParametersValue: Parameters contains specific information about the code.
This is a struct to allow parameters of different types. Examples: 1.
For a "HARNESS_STARTED" message parameters might provide the name of
the worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general
complex data structures should be avoided. If a worker needs to send a
specific and complicated data structure then please consider defining a
new proto and adding it to the data oneof in WorkerMessageResponse.
Conventions: Parameters should only be used for information that isn't
typically passed as a label. hostname and other worker identifiers
should almost always be passed as labels since they will be included on
most messages.
Fields:
code: The code is a string intended for consumption by a machine that
identifies the type of message being sent. Examples: 1.
"HARNESS_STARTED" might be used to indicate the worker harness has
started. 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error
downloading a GCS file as part of the boot process of one of the
worker containers. This is a string and not an enum to make it easy to
add new codes without waiting for an API change.
parameters: Parameters contains specific information about the code. This
is a struct to allow parameters of different types. Examples: 1. For a
"HARNESS_STARTED" message parameters might provide the name of the
worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general
complex data structures should be avoided. If a worker needs to send a
specific and complicated data structure then please consider defining a
new proto and adding it to the data oneof in WorkerMessageResponse.
Conventions: Parameters should only be used for information that isn't
typically passed as a label. hostname and other worker identifiers
should almost always be passed as labels since they will be included on
most messages.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
"""Parameters contains specific information about the code. This is a
struct to allow parameters of different types. Examples: 1. For a
"HARNESS_STARTED" message parameters might provide the name of the
worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general complex
data structures should be avoided. If a worker needs to send a specific
and complicated data structure then please consider defining a new proto
and adding it to the data oneof in WorkerMessageResponse. Conventions:
Parameters should only be used for information that isn't typically passed
as a label. hostname and other worker identifiers should almost always be
passed as labels since they will be included on most messages.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.StringField(1)
parameters = _messages.MessageField('ParametersValue', 2)
class WorkerMessageResponse(_messages.Message):
"""A worker_message response allows the server to pass information to the
sender.
Fields:
workerHealthReportResponse: The service's response to a worker's health
report.
workerMetricsResponse: Service's response to reporting worker metrics
(currently empty).
"""
workerHealthReportResponse = _messages.MessageField('WorkerHealthReportResponse', 1)
workerMetricsResponse = _messages.MessageField('ResourceUtilizationReportResponse', 2)
class WorkerPool(_messages.Message):
"""Describes one particular pool of Cloud Dataflow workers to be
instantiated by the Cloud Dataflow service in order to perform the
computations required by a job. Note that a workflow job may use multiple
pools, in order to match the various computational requirements of the
various stages of the job.
Enums:
DefaultPackageSetValueValuesEnum: The default package set to install.
This allows the service to select a default set of packages which are
useful to worker harnesses written in a particular language.
IpConfigurationValueValuesEnum: Configuration for VM IPs.
TeardownPolicyValueValuesEnum: Sets the policy for determining when to
turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`,
`TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means
workers are always torn down regardless of whether the job succeeds.
`TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds.
`TEARDOWN_NEVER` means the workers are never torn down. If the workers
are not torn down by the service, they will continue to run and use
Google Compute Engine VM resources in the user's project until they are
explicitly terminated by the user. Because of this, Google recommends
using the `TEARDOWN_ALWAYS` policy except for small, manually supervised
test jobs. If unknown or unspecified, the service will attempt to
choose a reasonable default.
Messages:
MetadataValue: Metadata to set on the Google Compute Engine VMs.
PoolArgsValue: Extra arguments for this worker pool.
Fields:
autoscalingSettings: Settings for autoscaling of this WorkerPool.
dataDisks: Data disks that are used by a VM in this workflow.
defaultPackageSet: The default package set to install. This allows the
service to select a default set of packages which are useful to worker
harnesses written in a particular language.
diskSizeGb: Size of root disk for VMs, in GB. If zero or unspecified, the
service will attempt to choose a reasonable default.
diskSourceImage: Fully qualified source image for disks.
diskType: Type of root disk for VMs. If empty or unspecified, the service
will attempt to choose a reasonable default.
ipConfiguration: Configuration for VM IPs.
kind: The kind of the worker pool; currently only `harness` and `shuffle`
are supported.
machineType: Machine type (e.g. "n1-standard-1"). If empty or
unspecified, the service will attempt to choose a reasonable default.
metadata: Metadata to set on the Google Compute Engine VMs.
network: Network to which VMs will be assigned. If empty or unspecified,
the service will use the network "default".
numThreadsPerWorker: The number of threads per worker harness. If empty or
unspecified, the service will choose a number of threads (according to
the number of cores on the selected machine type for batch, or 1 by
convention for streaming).
numWorkers: Number of Google Compute Engine workers in this pool needed to
execute the job. If zero or unspecified, the service will attempt to
choose a reasonable default.
onHostMaintenance: The action to take on host maintenance, as defined by
the Google Compute Engine API.
packages: Packages to be installed on workers.
poolArgs: Extra arguments for this worker pool.
subnetwork: Subnetwork to which VMs will be assigned, if desired.
Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
taskrunnerSettings: Settings passed through to Google Compute Engine
workers when using the standard Dataflow task runner. Users should
ignore this field.
teardownPolicy: Sets the policy for determining when to turndown worker
pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
`TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down
regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means
workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the
workers are never torn down. If the workers are not torn down by the
service, they will continue to run and use Google Compute Engine VM
resources in the user's project until they are explicitly terminated by
the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS`
policy except for small, manually supervised test jobs. If unknown or
unspecified, the service will attempt to choose a reasonable default.
workerHarnessContainerImage: Required. Docker container image that
executes the Cloud Dataflow worker harness, residing in Google Container
Registry.
zone: Zone to run the worker pools in. If empty or unspecified, the
service will attempt to choose a reasonable default.
"""
class DefaultPackageSetValueValuesEnum(_messages.Enum):
"""The default package set to install. This allows the service to select
a default set of packages which are useful to worker harnesses written in
a particular language.
Values:
DEFAULT_PACKAGE_SET_UNKNOWN: The default set of packages to stage is
unknown, or unspecified.
DEFAULT_PACKAGE_SET_NONE: Indicates that no packages should be staged at
the worker unless explicitly specified by the job.
DEFAULT_PACKAGE_SET_JAVA: Stage packages typically useful to workers
written in Java.
DEFAULT_PACKAGE_SET_PYTHON: Stage pacakges typically useful to workers
written in Python.
"""
DEFAULT_PACKAGE_SET_UNKNOWN = 0
DEFAULT_PACKAGE_SET_NONE = 1
DEFAULT_PACKAGE_SET_JAVA = 2
DEFAULT_PACKAGE_SET_PYTHON = 3
class IpConfigurationValueValuesEnum(_messages.Enum):
"""Configuration for VM IPs.
Values:
WORKER_IP_UNSPECIFIED: The configuration is unknown, or unspecified.
WORKER_IP_PUBLIC: Workers should have public IP addresses.
WORKER_IP_PRIVATE: Workers should have private IP addresses.
"""
WORKER_IP_UNSPECIFIED = 0
WORKER_IP_PUBLIC = 1
WORKER_IP_PRIVATE = 2
class TeardownPolicyValueValuesEnum(_messages.Enum):
"""Sets the policy for determining when to turndown worker pool. Allowed
values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
`TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down
regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means
workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the
workers are never torn down. If the workers are not torn down by the
service, they will continue to run and use Google Compute Engine VM
resources in the user's project until they are explicitly terminated by
the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS`
policy except for small, manually supervised test jobs. If unknown or
unspecified, the service will attempt to choose a reasonable default.
Values:
TEARDOWN_POLICY_UNKNOWN: The teardown policy isn't specified, or is
unknown.
TEARDOWN_ALWAYS: Always teardown the resource.
TEARDOWN_ON_SUCCESS: Teardown the resource on success. This is useful
for debugging failures.
TEARDOWN_NEVER: Never teardown the resource. This is useful for
debugging and development.
"""
TEARDOWN_POLICY_UNKNOWN = 0
TEARDOWN_ALWAYS = 1
TEARDOWN_ON_SUCCESS = 2
TEARDOWN_NEVER = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Metadata to set on the Google Compute Engine VMs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class PoolArgsValue(_messages.Message):
"""Extra arguments for this worker pool.
Messages:
AdditionalProperty: An additional property for a PoolArgsValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PoolArgsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
autoscalingSettings = _messages.MessageField('AutoscalingSettings', 1)
dataDisks = _messages.MessageField('Disk', 2, repeated=True)
defaultPackageSet = _messages.EnumField('DefaultPackageSetValueValuesEnum', 3)
diskSizeGb = _messages.IntegerField(4, variant=_messages.Variant.INT32)
diskSourceImage = _messages.StringField(5)
diskType = _messages.StringField(6)
ipConfiguration = _messages.EnumField('IpConfigurationValueValuesEnum', 7)
kind = _messages.StringField(8)
machineType = _messages.StringField(9)
metadata = _messages.MessageField('MetadataValue', 10)
network = _messages.StringField(11)
numThreadsPerWorker = _messages.IntegerField(12, variant=_messages.Variant.INT32)
numWorkers = _messages.IntegerField(13, variant=_messages.Variant.INT32)
onHostMaintenance = _messages.StringField(14)
packages = _messages.MessageField('Package', 15, repeated=True)
poolArgs = _messages.MessageField('PoolArgsValue', 16)
subnetwork = _messages.StringField(17)
taskrunnerSettings = _messages.MessageField('TaskRunnerSettings', 18)
teardownPolicy = _messages.EnumField('TeardownPolicyValueValuesEnum', 19)
workerHarnessContainerImage = _messages.StringField(20)
zone = _messages.StringField(21)
class WorkerSettings(_messages.Message):
"""Provides data to pass through to the worker harness.
Fields:
baseUrl: The base URL for accessing Google Cloud APIs. When workers
access Google Cloud APIs, they logically do so via relative URLs. If
this field is specified, it supplies the base URL to use for resolving
these relative URLs. The normative algorithm used is defined by RFC
1808, "Relative Uniform Resource Locators". If not specified, the
default value is "http://www.googleapis.com/"
reportingEnabled: Whether to send work progress updates to the service.
servicePath: The Cloud Dataflow service path relative to the root URL, for
example, "dataflow/v1b3/projects".
shuffleServicePath: The Shuffle service path relative to the root URL, for
example, "shuffle/v1beta1".
tempStoragePrefix: The prefix of the resources the system should use for
temporary storage. The supported resource type is: Google Cloud
Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
workerId: The ID of the worker running this pipeline.
"""
baseUrl = _messages.StringField(1)
reportingEnabled = _messages.BooleanField(2)
servicePath = _messages.StringField(3)
shuffleServicePath = _messages.StringField(4)
tempStoragePrefix = _messages.StringField(5)
workerId = _messages.StringField(6)
class WriteInstruction(_messages.Message):
"""An instruction that writes records. Takes one input, produces no outputs.
Fields:
input: The input.
sink: The sink to write to.
"""
input = _messages.MessageField('InstructionInput', 1)
sink = _messages.MessageField('Sink', 2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'dataflow')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'dataflow')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'dataflow')
| apache-2.0 |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/site-packages/setuptools/command/easy_install.py | 206 | 72706 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
from glob import glob
from distutils import log, dir_util
import pkg_resources
from setuptools import Command, _dont_write_bytecode
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
sys_executable = os.environ.get('__VENV_LAUNCHER__',
os.path.normpath(sys.executable))
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if sys.version_info <= (3,):
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=','S',"list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if os.path.isdir(filename) and not os.path.islink(filename):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
#XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir','script_dir','build_directory','site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options('install_lib',
('install_dir','install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options('install_scripts',
('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d+" (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable: self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path = self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path+sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path+sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize','optimize'))
if not isinstance(self.optimize,int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2): raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir,'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname()+'.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists: os.unlink(testfile)
open(testfile,'w').close()
os.unlink(testfile)
except (OSError,IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname()+".pth"
ok_file = pth_file+'.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists: os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file,'w')
except (OSError,IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,))
f.close()
f=None
executable = sys.executable
if os.name=='nt':
dirname,basename = os.path.split(executable)
alt = os.path.join(dirname,'pythonw.exe')
if basename.lower()=='python.exe' and os.path.exists(alt):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable,'-E','-c','pass'],0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/'+script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base,filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self,spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable: self.install_site_py()
try:
if not isinstance(spec,Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable, not self.always_copy,
self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg+=" (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence==DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location==download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = "%r already exists in %s; build directory %s will not be kept"
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename)==setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents)==1:
dist_filename = os.path.join(setup_base,contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
def get_template(filename):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
These templates use triple-quotes to escape variable
substitutions so the scripts get the 2to3 treatment when build
on Python 3. The templates cannot use triple-quotes naturally.
"""
raw_bytes = resource_string('setuptools', template_name)
template_str = raw_bytes.decode('utf-8')
clean_template = template_str.replace('"""', '')
return clean_template
if is_script:
template_name = 'script template.py'
if dev_path:
template_name = template_name.replace('.py', ' (dev).py')
script_text = (get_script_header(script_text) +
get_template(template_name) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir,x) for x in blockers])
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" % os.path.abspath(dist_filename)
)
if len(setups)>1:
raise DistutilsError(
"Multiple setup scripts in %s" % os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path,metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink,(destination,),"Removing "+destination)
uncache_zipdir(destination)
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f,m = self.unpack_and_compile, "Extracting"
elif egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m+" %s to %s") %
(os.path.basename(egg_path),os.path.dirname(destination)))
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata','name'),
version=cfg.get('metadata','version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf,'w')
f.write('Metadata-Version: 1.0\n')
for k,v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_','-').title(), v))
f.close()
script_dir = os.path.join(_egg_info,'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src,dst):
s = src.lower()
for old,new in prefixes:
if s.startswith(old):
src = new+src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old!='SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1])+'.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level','native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
if not os.path.exists(txt):
f = open(txt,'w')
f.write('\n'.join(locals()[name])+'\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path,sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose>2:
v = 'v' * (self.verbose - 1)
args.insert(0,'-'+v)
elif self.verbose<2:
args.insert(0,'-q')
if self.dry_run:
args.insert(0,'-n')
log.info(
"Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives: continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key=='setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir,'setuptools.pth')
if os.path.islink(filename): os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location)+'\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src,dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0x16D) & 0xFED # 0555, 07755
chmod(f, mode)
def byte_compile(self, to_compile):
if _dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH',''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy,'rb')
current = f.read()
# we want str, not bytes
if sys.version_info >= (3,):
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy,'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0x1C0) # 0700
INSTALL_SCHEMES = dict(
posix = dict(
install_dir = '$base/lib/python$py_version_short/site-packages',
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir = '$base/Lib/site-packages',
script_dir = '$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
for attr,val in scheme.items():
if getattr(self,attr,None) is None:
setattr(self,attr,val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs: sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth','setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname,name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2,6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts)==3 and parts[2]=='PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB','PLATLIB'):
contents = z.read(name)
if sys.version_info >= (3,):
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\','/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
finally:
z.close()
prefixes = [(x.lower(),y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename,'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir,path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative,self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename,'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
if (dist.location not in self.paths and (
dist.location not in self.sitedirs or
dist.location == os.getcwd() # account for '.' being in PYTHONPATH
)):
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self,path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep=='/' and '/' or os.sep
while len(npath)>=baselen:
if npath==self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
from distutils.command.build_scripts import first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
if not isinstance(first_line_re.pattern, str):
first_line_re = re.compile(first_line_re.pattern.decode())
first = (script_text+'\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name=='nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func,arg))))
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache)
def _uncache(path, cache):
if path in cache:
del cache[path]
else:
path = normalize_path(path)
for p in cache:
if normalize_path(p)==path:
del cache[p]
return
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError,IOError): return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args): pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=sys_executable, wininst=False):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
gen_class = cls.get_writer(wininst)
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = gen_class.template % locals()
for res in gen_class._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
if force_windows or sys.platform=='win32':
return WindowsScriptWriter.get_writer()
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header+script_text)
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
"""
Get a script writer suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield name+ext, header+script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_=='gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py','.pyc','.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield (name+ext, hdr+script_text, 't', blockers)
yield (
name+'.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower()=='arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if sys.version_info[0] < 3:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0x12) # 022
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self,*args,**kw):
with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(lambda:
setup(
script_args = ['-q','easy_install', '-v']+argv,
script_name = sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
| lgpl-3.0 |
meteokid/python-rpn | share/examples/fst_to_lalo.py | 1 | 4082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Barbara Casati <[email protected]>
# Author: Stephane Chamberland <[email protected]>
"""
Interpolate RPNSTD rec to latlon points
"""
import sys
import optparse
import numpy as np
from scipy import interpolate
import rpnpy.librmn.all as rmn
if __name__ == "__main__":
inttypelist = {
'n' : rmn.EZ_INTERP_NEAREST,
'l' : rmn.EZ_INTERP_LINEAR,
'c' : rmn.EZ_INTERP_CUBIC
}
# Command line arguments
desc="Interpolate RPNSTD rec to latlon points"
usage = """
%prog -f FSTFILE -n VARNAME -o OUTFILE [-l LOLAFILE] [-t INTTYPE]
LOLAFILE format, one destination point per line:
lon1 lat1
lon2 lat2
...
OUTPUT format
lon1, lat1, value1, extrap
lon2, lat2, value2, extrap
"""
parser = optparse.OptionParser(usage=usage,description=desc)
parser.add_option("-f","--fstfile",dest="fstfile",default="",
help="Name of RPN STD file containing records")
parser.add_option("-n","--varname",dest="varname",default="",
help="Varname of the record to interpolate")
parser.add_option("-l","--lolafile",dest="lolafile",default="/cnfs/dev/mrb/armn/armnbca/MesoVIC/VERA/VERA_8km_coordinates_lam_phi.txt",
help="Name of text file with destination coordinates, one 'lon lat' per line")
parser.add_option("-t","--inttype",dest="inttype",default="linear",
help="Interpolation type: nearest, linear or cubic")
parser.add_option("-o","--outfile",dest="outfile",default="",
help="Output file name")
(options,args) = parser.parse_args()
if not (options.varname and options.fstfile and options.outfile and options.lolafile and options.inttype):
sys.stderr.write('Error: You need to specify a varname, an fst filename, an outfile name and a lolafile name.\n')
parser.print_help()
sys.exit(1)
inttype = options.inttype[0].lower()
if not (inttype in inttypelist.keys()):
sys.stderr.write('Error: INTTYPE should be one of: nearest, linear or cubic.\n')
parser.print_help()
sys.exit(1)
# Open and Read RPN STD file
try:
rmn.fstopt(rmn.FSTOP_MSGLVL, rmn.FSTOPI_MSG_CATAST)
funit = rmn.fstopenall(options.fstfile,rmn.FST_RO)
k = rmn.fstinf(funit,nomvar=options.varname)['key']
data = rmn.fstluk(k)['d']
meta = rmn.fstprm(k)
except:
raise rmn.RMNError('Problem opening/reading var=%s in File=%s' % (options.varname,options.fstfile))
# Define input record grid
try:
meta['iunit'] = funit
grid = rmn.ezqkdef(meta)
except:
raise rmn.RMNError('Problem defining input grid for var=%s in File=%s' % (options.varname,options.fstfile))
# Read lat lon file
try:
(lon,lat) = np.loadtxt(options.lolafile, dtype=np.float32, unpack=True)
## lat = np.asfortranarray(lat, dtype=np.float32)
## lon = np.asfortranarray(lon, dtype=np.float32)
except:
raise IOError('Problem reading the lola file: %s' % (options.lolafile))
# Interpolate input data to lat lon and print
rmn.ezsetopt(rmn.EZ_OPT_INTERP_DEGREE,inttypelist[inttype])
#rmn.ezsetopt(rmn.EZ_OPT_EXTRAP_DEGREE,rmn.EZ_EXTRAP_MAX)
(ni,nj) = data.shape
outfile = open(options.outfile, 'w')
for n in range(lat.size):
(lat2,lon2) = (np.asarray([lat[n]]),np.asarray([lon[n]]))
lldata2 = rmn.gdllsval(grid, lat2, lon2, data)
xypos2 = rmn.gdxyfll(grid, lat2, lon2)
extrap = ''
if (xypos2['x'][0] < 1. or xypos2['x'][0] > ni or
xypos2['y'][0] < 1. or xypos2['y'][0] > nj):
extrap='extrap'
outfile.write("%9.5f, %9.5f, %9.5f, %s\n" %
(lon[n], lat[n], lldata2[0], extrap))
del lldata2, lat2, lon2, xypos2
outfile.close()
# Close the RPN STD file
try:
rmn.fstcloseall(funit)
except:
pass
| lgpl-2.1 |
tlangerak/Multi-Agent-Systems | build/lib.win-amd64-2.7/tlslite/utils/cipherfactory.py | 357 | 3177 | """Factory functions for symmetric cryptography."""
import os
import Python_AES
import Python_RC4
import cryptomath
tripleDESPresent = False
if cryptomath.m2cryptoLoaded:
import OpenSSL_AES
import OpenSSL_RC4
import OpenSSL_TripleDES
tripleDESPresent = True
if cryptomath.cryptlibpyLoaded:
import Cryptlib_AES
import Cryptlib_RC4
import Cryptlib_TripleDES
tripleDESPresent = True
if cryptomath.pycryptoLoaded:
import PyCrypto_AES
import PyCrypto_RC4
import PyCrypto_TripleDES
tripleDESPresent = True
# **************************************************************************
# Factory Functions for AES
# **************************************************************************
def createAES(key, IV, implList=None):
"""Create a new AES object.
@type key: str
@param key: A 16, 24, or 32 byte string.
@type IV: str
@param IV: A 16 byte string
@rtype: L{tlslite.utils.AES}
@return: An AES object.
"""
if implList == None:
implList = ["cryptlib", "openssl", "pycrypto", "python"]
for impl in implList:
if impl == "cryptlib" and cryptomath.cryptlibpyLoaded:
return Cryptlib_AES.new(key, 2, IV)
elif impl == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_AES.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_AES.new(key, 2, IV)
elif impl == "python":
return Python_AES.new(key, 2, IV)
raise NotImplementedError()
def createRC4(key, IV, implList=None):
"""Create a new RC4 object.
@type key: str
@param key: A 16 to 32 byte string.
@type IV: object
@param IV: Ignored, whatever it is.
@rtype: L{tlslite.utils.RC4}
@return: An RC4 object.
"""
if implList == None:
implList = ["cryptlib", "openssl", "pycrypto", "python"]
if len(IV) != 0:
raise AssertionError()
for impl in implList:
if impl == "cryptlib" and cryptomath.cryptlibpyLoaded:
return Cryptlib_RC4.new(key)
elif impl == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RC4.new(key)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RC4.new(key)
elif impl == "python":
return Python_RC4.new(key)
raise NotImplementedError()
#Create a new TripleDES instance
def createTripleDES(key, IV, implList=None):
"""Create a new 3DES object.
@type key: str
@param key: A 24 byte string.
@type IV: str
@param IV: An 8 byte string
@rtype: L{tlslite.utils.TripleDES}
@return: A 3DES object.
"""
if implList == None:
implList = ["cryptlib", "openssl", "pycrypto"]
for impl in implList:
if impl == "cryptlib" and cryptomath.cryptlibpyLoaded:
return Cryptlib_TripleDES.new(key, 2, IV)
elif impl == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_TripleDES.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_TripleDES.new(key, 2, IV)
raise NotImplementedError() | lgpl-2.1 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/configSectionNameDialog.py | 150 | 3720 | """
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
"""
from Tkinter import *
import tkMessageBox
class GetCfgSectionNameDialog(Toplevel):
def __init__(self,parent,title,message,usedNames):
"""
message - string, informational message to display
usedNames - list, list of names already in use for validity check
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.message=message
self.usedNames=usedNames
self.result=''
self.CreateWidgets()
self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
self.name=StringVar(self)
self.fontSize=StringVar(self)
self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
text=self.message)#,aspect=200)
entryName=Entry(self.frameMain,textvariable=self.name,width=30)
entryName.focus_set()
self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
entryName.pack(padx=5,pady=5)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOk = Button(frameButtons,text='Ok',
width=8,command=self.Ok)
self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
def NameOk(self):
#simple validity check for a sensible
#ConfigParser file section name
nameOk=1
name=self.name.get()
name.strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
nameOk=0
elif name in self.usedNames:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
nameOk=0
return nameOk
def Ok(self, event=None):
if self.NameOk():
self.result=self.name.get().strip()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetCfgSectionNameDialog(root,'Get Name',
'The information here should need to be word wrapped. Test.')
print dlg.result
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| gpl-2.0 |
osiell/server-tools | base_user_reset_access/tests/test_base_user_reset_access.py | 21 | 1728 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of base_user_reset_access,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# base_user_reset_access is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# base_user_reset_access is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with base_user_reset_access.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestResetUserAccessRight(common.TransactionCase):
def setUp(self):
super(TestResetUserAccessRight, self).setUp()
self.user_obj = self.env['res.users']
def test_reset_demo_user_access_right(self):
# I get the demo user
demo_user = self.env.ref('base.user_demo')
demo_user.groups_id = [(4, self.ref('base.group_no_one'))]
demo_user.reset_access_right()
default_groups_ids = self.user_obj._get_group()
# I check if access right on this user are reset
self.assertEquals(set(demo_user.groups_id.ids),
set(default_groups_ids))
| agpl-3.0 |
Shnatsel/cjdns | node_build/dependencies/libuv/build/gyp/test/win/gyptest-link-generate-manifest.py | 238 | 4708 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure we generate a manifest file when linking binaries, including
handling AdditionalManifestFiles.
"""
import TestGyp
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
test.build('generate-manifest.gyp', test.ALL, chdir=CHDIR)
# Make sure that generation of .generated.manifest does not cause a relink.
test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
test.up_to_date('generate-manifest.gyp', test.ALL, chdir=CHDIR)
def test_manifest(filename, generate_manifest, embedded_manifest,
extra_manifest):
exe_file = test.built_file_path(filename, chdir=CHDIR)
if not generate_manifest:
test.must_not_exist(exe_file + '.manifest')
manifest = extract_manifest(exe_file, 1)
test.fail_test(manifest)
return
if embedded_manifest:
manifest = extract_manifest(exe_file, 1)
test.fail_test(not manifest)
else:
test.must_exist(exe_file + '.manifest')
manifest = test.read(exe_file + '.manifest')
test.fail_test(not manifest)
test.fail_test(extract_manifest(exe_file, 1))
if generate_manifest:
test.must_contain_any_line(manifest, 'requestedExecutionLevel')
if extra_manifest:
test.must_contain_any_line(manifest,
'35138b9a-5d96-4fbd-8e2d-a2440225f93a')
test.must_contain_any_line(manifest,
'e2011457-1546-43c5-a5fe-008deee3d3f0')
test_manifest('test_generate_manifest_true.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_false.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_default.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_true_as_embedded.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_false_as_embedded.exe',
generate_manifest=False,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_default_as_embedded.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_true_with_extra_manifest.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_false_with_extra_manifest.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_true_with_extra_manifest_list.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_false_with_extra_manifest_list.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_default_embed_default.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test.pass_test()
| gpl-3.0 |
cedadev/cloudhands-web | cloudhands/web/main.py | 1 | 41523 | #!/usr/bin/env python3
# encoding: UTF-8
import argparse
import datetime
import functools
import logging
import operator
import os.path
import platform
import re
import sqlite3
import sys
import uuid
import bcrypt
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.exceptions import Forbidden
from pyramid.exceptions import NotFound
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPClientError
from pyramid.httpexceptions import HTTPCreated
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.renderers import JSON
from pyramid.security import authenticated_userid
from pyramid.security import forget
from pyramid.security import remember
from pyramid_authstack import AuthenticationStackPolicy
from pyramid_macauth import MACAuthenticationPolicy
from sqlalchemy import desc
from waitress import serve
from cloudhands.common.connectors import initialise
from cloudhands.common.connectors import Registry
from cloudhands.common.discovery import settings
import cloudhands.common.factories
from cloudhands.common.pipes import SimplePipeQueue
from cloudhands.common.schema import Appliance
from cloudhands.common.schema import BcryptedPassword
from cloudhands.common.schema import CatalogueChoice
from cloudhands.common.schema import CatalogueItem
from cloudhands.common.schema import EmailAddress
from cloudhands.common.schema import Host
from cloudhands.common.schema import Label
from cloudhands.common.schema import Membership
from cloudhands.common.schema import Organisation
from cloudhands.common.schema import OSImage
from cloudhands.common.schema import PosixUId
from cloudhands.common.schema import PosixUIdNumber
from cloudhands.common.schema import PosixGId
from cloudhands.common.schema import Provider
from cloudhands.common.schema import PublicKey
from cloudhands.common.schema import Registration
from cloudhands.common.schema import Resource
from cloudhands.common.schema import Serializable
from cloudhands.common.schema import Subscription
from cloudhands.common.schema import State
from cloudhands.common.schema import Touch
from cloudhands.common.schema import User
from cloudhands.common.states import ApplianceState
from cloudhands.common.states import HostState
from cloudhands.common.states import MembershipState
from cloudhands.common.states import RegistrationState
import cloudhands.web
from cloudhands.identity.ldap_account import change_password
from cloudhands.identity.ldap_account import next_uidnumber
from cloudhands.identity.membership import handle_from_email
from cloudhands.identity.membership import Acceptance
from cloudhands.identity.membership import Invitation
from cloudhands.identity.registration import NewAccount
from cloudhands.identity.registration import NewPassword
from cloudhands.web.catalogue import CatalogueItemView
from cloudhands.web.indexer import people
from cloudhands.web import __version__
from cloudhands.web.model import BcryptedPasswordView
from cloudhands.web.model import HostView
from cloudhands.web.model import LabelView
from cloudhands.web.model import MembershipView
from cloudhands.web.model import Page
from cloudhands.web.model import PageInfo
from cloudhands.web.model import PeoplePage
from cloudhands.web.model import PublicKeyView
from cloudhands.web.model import RegistrationView
from cloudhands.web.model import StateView
DFLT_PORT = 8080
DFLT_DB = ":memory:"
DFLT_IX = "cloudhands.wsh"
CRED_TABLE = {}
def cfg_paths(request, cfg=None):
cfg = cfg or {
"paths.assets": dict(
css = "cloudhands.web:static/css",
html = "cloudhands.web:static/html",
img = "cloudhands.web:static/img",
js = "cloudhands.web:static/js")
}
return {p: os.path.dirname(request.static_url(
'/'.join((cfg["paths.assets"][p], f))))
for p, f in (
("css", "any.css"), ("js", "any.js"), ("img", "any.png"))}
def registered_connection(request):
r = Registry()
return r.connect(*next(iter(r.items)))
def authenticate_user(request, refuse:Exception=None):
userId = authenticated_userid(request)
if refuse and userId is None:
raise refuse("Authentication failure")
con = registered_connection(request)
# Persona's user ids are email addresses, whereas Pyramid auth uses
# user names. We want to test for either.
user = (con.session.query(User).filter(User.handle == userId).first() or
con.session.query(User).join(Touch).join(
EmailAddress).filter(EmailAddress.value == userId).first())
if refuse and not user:
nf = refuse("User not found for {}".format(userId))
nf.userId = userId
raise nf
return user
def create_membership_resources(session, m, rTyp, vals):
provider = session.query(Provider).first() # FIXME
latest = m.changes[-1]
for v in vals:
resource = rTyp(value=v, provider=provider)
now = datetime.datetime.utcnow()
act = Touch(artifact=m, actor=latest.actor, state=latest.state, at=now)
m.changes.append(act)
resource.touch = act
try:
session.add(resource)
session.commit()
except Exception as e:
session.rollback()
finally:
yield session.query(rTyp).filter(
rTyp.value == v, rTyp.provider == provider).first()
def datetime_adapter(obj, request):
return str(obj)
def regex_adapter(obj, request):
return obj.pattern
def record_adapter(obj, request):
rv = obj.as_dict()
try:
del rv["id"]
except KeyError:
pass
return rv
def touch_adapter(obj, request):
return {
"at": obj.at,
"state": {
"fsm": obj.state.fsm,
"name": obj.state.name
}
}
class LoginForbidden(Forbidden): pass
class RegistrationForbidden(Forbidden): pass
def top_read(request):
log = logging.getLogger("cloudhands.web.top_read")
con = registered_connection(request)
user = authenticate_user(request)
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
page.layout.info.push(PageInfo(refresh=30))
if user:
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == user.uuid).first()
if reg:
page.layout.nav.push(reg)
else:
mships = []
for org in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(org)
for act in con.session.query(Touch).order_by(desc(Touch.at)).limit(5):
page.layout.items.push(act)
return dict(page.termination())
def appliance_read(request):
log = logging.getLogger("cloudhands.web.appliance_read")
con = registered_connection(request)
user = authenticate_user(request)
appUuid = request.matchdict["app_uuid"]
app = con.session.query(Appliance).filter(
Appliance.uuid == appUuid).first()
if not app:
raise NotFound("Appliance {} not found".format(appUuid))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
page.layout.info.push(PageInfo(
title="Configure appliance",
url=request.route_url("appliance", app_uuid=appUuid)))
if user is not None:
user = con.session.merge(user)
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
for o in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(o)
else:
page.layout.nav.push(app.organisation)
resources = [r for c in app.changes for r in c.resources]
for i in resources:
page.layout.items.push(i)
if not any(i for i in resources if isinstance(i, Label)):
label = Label()
label.uuid = appUuid
page.layout.items.push(label)
# option for public IP address
return dict(page.termination())
def appliance_modify(request):
log = logging.getLogger("cloudhands.web.appliance_modify")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
appUuid = request.matchdict["app_uuid"]
app = con.session.query(Appliance).filter(
Appliance.uuid == appUuid).first()
if not app:
raise NotFound("Appliance {} not found".format(appUuid))
now = datetime.datetime.utcnow()
data = StateView(request.POST)
if data.invalid:
data = LabelView(request.POST)
if data.invalid:
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
else:
pre_provision = con.session.query(ApplianceState).filter(
ApplianceState.name == "pre_provision").one()
act = Touch(artifact=app, actor=user, state=pre_provision, at=now)
label = Label(
name=data["name"], description=data["description"],
touch=act)
con.session.add(label)
con.session.commit()
else:
state = con.session.query(State).filter(
State.fsm == data["fsm"]).filter(
State.name == data["name"]).first()
if state is None:
raise HTTPBadRequest(
"No such state {fsm} {name}".format(**data))
else:
act = Touch(artifact=app, actor=user, state=state, at=now)
con.session.add(act)
con.session.commit()
raise HTTPFound(
location=request.route_url(
"organisation", org_name=app.organisation.name))
def host_update(request):
log = logging.getLogger("cloudhands.web.host_update")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request))
hUuid = request.matchdict["host_uuid"]
host = con.session.query(Host).filter(
Host.uuid == hUuid).first()
if not host:
raise NotFound("Host {} not found".format(hUuid))
try:
oN = host.organisation.name
except Exception as e:
log.debug(e)
raise NotFound("Organisation not found for host {}".format(hUuid))
data = StateView(request.POST)
try:
badField = data.invalid[0].name
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(badField))
except (IndexError, AttributeError):
if data["fsm"] != "host":
raise HTTPBadRequest(
"Bad FSM value: {}".format(data["fsm"]))
state = con.session.query(HostState).filter(
HostState.name==data["name"]).first()
if not state:
raise NotFound("No such state '{}'".format(data["name"]))
now = datetime.datetime.utcnow()
act = Touch(artifact=host, actor=user, state=state, at=now)
host.changes.append(act)
try:
con.session.commit()
except Exception as e:
log.debug(e)
con.session.rollback()
raise HTTPFound(
location=request.route_url("organisation", org_name=oN))
def login_read(request):
log = logging.getLogger("cloudhands.web.login_read")
username = dict(request.GET).get("username", "")
page = Page(
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
if getattr(request, "exception", None) is not None:
page.layout.info.push(request.exception)
user = User(
uuid=uuid.uuid4().hex,
handle=username)
page.layout.options.push(user)
return dict(page.termination())
def login_update(request):
log = logging.getLogger("cloudhands.web.login_update")
con = registered_connection(request)
data = RegistrationView(request.POST)
if data.invalid:
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
user = con.session.query(User).filter(
User.handle == data["username"]).first()
if not user:
raise HTTPClientError("User {} not found".format(data["username"]))
# Find the most recent valid registration for this user
reg = con.session.query(Registration).join(Touch).join(User).join(
State).filter(User.handle == data["username"]).filter(
State.name not in ("expired", "withdrawn")).order_by(
desc(Touch.at)).first()
if not reg:
raise HTTPInternalServerError(
"No valid registration found for {}".format(user.handle))
try:
passwords = sorted(
((c.at, r) for c in reg.changes for r in c.resources
if isinstance(r, BcryptedPassword)),
reverse=True)
hash = passwords[0][1].value
except (AttributeError, IndexError):
raise HTTPInternalServerError(
"Registration {} is missing a password".format(reg.uuid))
if bcrypt.checkpw(data["password"], hash):
headers = remember(request, user.handle)
latest = reg.changes[-1]
if latest.state.name == "pre_user_posixaccount":
taken = {i.value for i in con.session.query(PosixUIdNumber).all()}
uidN = next_uidnumber(taken=taken)
if uidN is None:
raise HTTPInternalServerError(
"UIdNumber could not be allocated")
else:
log.info("Allocating user id number {}".format(uidN))
latest = NewAccount(user, uidN, reg)(con.session)
# TODO: check state and report error
if latest.state.name in ("user_posixaccount", "valid"):
# FIXME: Temporary workaround for race condition (bug #380)
try:
uids = sorted(
((c.at, r) for c in reg.changes for r in c.resources
if isinstance(r, PosixUId)),
reverse=True)
uid = uids[0][1].value
status = change_password(uid, data["password"], timeout=3)
except (AttributeError, IndexError):
raise HTTPInternalServerError(
"Registration {} is missing a uid".format(reg.uuid))
else:
if status is None:
raise HTTPInternalServerError(
"Unable to create password-protected account")
try:
config = request.registry.settings["cfg"]
pxUId = con.session.query(PosixUId).join(Touch).join(
Registration).filter(Registration.uuid == reg.uuid).first()
providers = con.session.query(Provider).join(Subscription).join(
Organisation).join(Membership).join(Touch).join(User).filter(
User.id == user.id).all()
for provider in providers:
# TODO: pipes will be one per provider
path = os.path.expanduser(config["pipe.tokens"]["vcloud"])
msg = (reg.uuid, provider.name, pxUId.value, data["password"])
pq = SimplePipeQueue.pipequeue(path)
pq.put_nowait(msg)
pq.close()
except Exception as e:
log.error(e)
raise HTTPFound(
location = request.route_url("top"), headers = headers)
else:
raise LoginForbidden("Login failed. Please try again.")
def logout_update(request):
log = logging.getLogger("cloudhands.web.logout_update")
headers = forget(request)
log.debug(headers)
raise HTTPFound(
location = request.route_url("top"), headers = headers)
def membership_read(request):
log = logging.getLogger("cloudhands.web.membership_read")
con = registered_connection(request)
user = authenticate_user(request) # NB: may be None
m_uuid = request.matchdict["mship_uuid"]
mship = con.session.query(Membership).filter(
Membership.uuid == m_uuid).first()
if mship is None:
raise NotFound("Membership {} not found".format(m_uuid))
if mship.changes and mship.changes[-1].state.name == "invited":
act = Acceptance(mship, user)(con.session)
log.debug(act)
guest_uuid = act.actor.uuid
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == guest_uuid).first()
if not reg:
raise NotFound("Registration not found for {}".format(guest_uuid))
else:
raise HTTPFound(
location=request.route_url("registration", reg_uuid=reg.uuid))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
rsrcs = con.session.query(Resource).join(Touch).join(Membership).filter(
Membership.uuid == m_uuid).all()
for r in rsrcs:
page.layout.items.push(r)
page.layout.options.push(mship)
return dict(page.termination())
def membership_update(request):
log = logging.getLogger("cloudhands.web.membership_update")
user = authenticate_user(request)
con = registered_connection(request)
m_uuid = request.matchdict["mship_uuid"]
mship = con.session.query(Membership).filter(
Membership.uuid == m_uuid).first()
if not mship:
raise NotFound()
prvlg = con.session.query(Membership).join(Organisation).join(
Touch).join(User).filter(
User.id == user.id).filter(
Organisation.id == mship.organisation.id).filter(
Membership.role == "admin").first()
if not prvlg or not prvlg.changes[-1].state.name in ("accepted", "active"):
raise Forbidden("Admin privilege is required to update membership.")
index = request.registry.settings["args"].index
query = dict(request.POST).get("designator", "") # TODO: validate
try:
p = next(people(index, query, field="id"))
except:
raise Forbidden("LDAP record not accessible.")
for typ, vals in zip(
(PosixUId, PosixGId, PublicKey), ([p.uid], p.gids, p.keys)
):
for r in create_membership_resources(con.session, mship, typ, vals):
log.debug(r)
raise HTTPFound(
location=request.route_url("membership", mship_uuid=m_uuid))
def organisation_read(request):
log = logging.getLogger("cloudhands.web.organisation_read")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation not found for {}".format(oN))
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == user.uuid).first()
page.layout.nav.push(reg)
for o in sorted(
{i.organisation for i in mships},
key=operator.attrgetter("name")
):
page.layout.nav.push(o, isSelf=o is org)
refresh = 300
seconds = {
"pre_provision": 5,
"provisioning": 15,
"pre_check": 2,
"pre_delete": 2,
"pre_start": 2,
"pre_stop": 2,
"pre_operational": 5,
"operational": 60,
}
for t, s, a in sorted((
(a.changes[-1].at, a.changes[-1].state.name, a)
for a in org.appliances),
reverse=True
):
refresh = min(refresh, seconds.get(s, 300))
page.layout.items.push(a)
page.layout.info.push(PageInfo(title=oN, refresh=refresh))
mships = con.session.query(Membership).join(Organisation).join(
Touch).join(State).join(User).filter(
User.id == user.id).filter(
Organisation.id == org.id).all()
for m in mships:
page.layout.options.push(m, session=con.session)
return dict(page.termination())
def organisation_catalogue_read(request):
log = logging.getLogger("cloudhands.web.organisation_catalogue_read")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation not found for {}".format(oN))
else:
page.layout.info.push(PageInfo(title=oN))
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == user.uuid).first()
page.layout.nav.push(reg)
for o in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(o, isSelf=o is org)
for i in org.catalogue:
page.layout.items.push(i)
return dict(page.termination())
# TODO: Remove
def organisation_hosts_create(request):
log = logging.getLogger("cloudhands.web.organisation_hosts_create")
userId = authenticated_userid(request)
if userId is None:
raise Forbidden()
con = registered_connection(request)
user = con.session.query(User).join(Touch).join(
EmailAddress).filter(EmailAddress.value == userId).first()
if not user:
raise NotFound("User not found for {}".format(userId))
data = HostView(request.POST)
if data.invalid:
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
oN = request.matchdict["org_name"]
if data["jvo"] != oN:
raise HTTPBadRequest("Mismatched organisation field")
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation '{}' not found".format(oN))
now = datetime.datetime.utcnow()
requested = con.session.query(HostState).filter(
HostState.name == "requested").one()
host = Host(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
name=data["name"]
)
act = Touch(artifact=host, actor=user, state=requested, at=now)
host.changes.append(act)
con.session.add(OSImage(name=data["image"], touch=act))
log.info(host)
con.session.add(host)
con.session.commit()
raise HTTPFound(
location=request.route_url("organisation", org_name=oN))
def organisation_appliances_create(request):
log = logging.getLogger("cloudhands.web.organisation_appliances_create")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
data = CatalogueItemView(request.POST)
if data.invalid:
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation '{}' not found".format(oN))
now = datetime.datetime.utcnow()
configuring = con.session.query(ApplianceState).filter(
ApplianceState.name == "configuring").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=configuring, at=now)
tmplt = con.session.query(CatalogueItem).filter(
CatalogueItem.uuid == data["uuid"]).first()
choice = CatalogueChoice(
provider=None, touch=act,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo", "natrouted")})
con.session.add(choice)
con.session.commit()
raise HTTPFound(
location=request.route_url("appliance", app_uuid=app.uuid))
def organisation_memberships_create(request):
log = logging.getLogger("cloudhands.web.organisation_memberships_create")
cfg = request.registry.settings.get("cfg", None)
con = registered_connection(request)
data = MembershipView(request.POST)
if data.invalid:
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation '{}' not found".format(oN))
admin = con.session.merge(authenticate_user(request, Forbidden))
invite = Invitation(
admin, org,
data["username"], data["surname"], data["email"]
)(con.session)
if not invite:
raise Forbidden("User {} lacks permission.".format(admin.handle))
else:
log.debug(invite.artifact)
# TODO: calculate this location from membership_read view
locn = request.route_url(
"membership", mship_uuid=invite.artifact.uuid)
raise HTTPFound(location=request.static_url(
"{}/membership-confirm.html".format(
cfg["paths.assets"]["html"])))
def people_read(request):
log = logging.getLogger("cloudhands.web.people")
userId = authenticated_userid(request)
if userId is None:
raise Forbidden()
con = registered_connection(request)
user = con.session.query(User).join(Touch).join(
EmailAddress).filter(EmailAddress.value == userId).first()
page = PeoplePage(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
index = request.registry.settings["args"].index
query = dict(request.GET).get("description", "") # TODO: validate
try:
for p in people(index, query):
page.layout.items.push(p)
except Exception:
log.warning("No access to index {}".format(index))
raise HTTPInternalServerError(
location=request.route_url("people"),
detail="Temporary loss of index. Please try again later.")
return dict(page.termination())
def macauth_creds(request):
userId = authenticated_userid(request)
if userId is None:
raise Forbidden()
# Get a reference to the MACAuthenticationPolicy plugin.
stack = request.registry.getUtility(IAuthenticationPolicy)
policy = stack.policies["apimac"]
try:
id, key = CRED_TABLE[userId]
except KeyError:
id, key = policy.encode_mac_id(request, userId)
CRED_TABLE[userId] = (id, key)
return {"id": id, "key": key}
def registration_passwords(request):
log = logging.getLogger("cloudhands.web.registration_passwords")
con = registered_connection(request)
cfg = request.registry.settings.get("cfg", None)
reg_uuid = request.matchdict["reg_uuid"]
reg = con.session.query(Registration).filter(
Registration.uuid == reg_uuid).first()
if not reg:
raise NotFound("Registration {} not found".format(reg_uuid))
# This page can be visited while unauthenticated but only in the
# first phase of the onboarding process.
sName = reg.changes[-1].state.name
if sName == "pre_registration_person":
user = reg.changes[0].actor
else:
user = con.session.merge(authenticate_user(request, Forbidden))
if not user is reg.changes[0].actor:
raise Forbidden(
"You are not authorized to modify this registration.")
data = BcryptedPasswordView(request.POST)
if data.invalid:
bad = data.invalid[0].name
if bad == "password":
raise RegistrationForbidden(
"The password you entered does not conform to requirements."
" Please choose again.")
else:
raise HTTPBadRequest("Bad value in '{}' field".format(bad))
act = NewPassword(user, data["password"], reg)(con.session)
raise HTTPFound(location=request.route_url(
"login", _query={"username": user.handle}))
def registration_keys(request):
log = logging.getLogger("cloudhands.web.registration_keys")
reg_uuid = request.matchdict["reg_uuid"]
con = registered_connection(request)
reg = con.session.query(Registration).filter(
Registration.uuid == reg_uuid).first()
if not reg:
raise NotFound("Registration {} not found".format(reg_uuid))
user = con.session.merge(authenticate_user(request, Forbidden))
if not user is reg.changes[0].actor:
raise Forbidden(
"You are not authorized to modify this registration.")
data = PublicKeyView(request.POST)
if data.invalid:
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
now = datetime.datetime.utcnow()
state = reg.changes[-1].state
act = Touch(artifact=reg, actor=user, state=state, at=now)
key = PublicKey(touch=act, value=data["value"].strip())
con.session.add(key)
con.session.commit()
raise HTTPFound(
location=request.route_url("registration", reg_uuid=reg.uuid))
def registration_read(request):
log = logging.getLogger("cloudhands.web.registration_read")
con = registered_connection(request)
reg_uuid = request.matchdict["reg_uuid"]
reg = con.session.query(Registration).filter(
Registration.uuid == reg_uuid).first()
if not reg:
raise NotFound("Registration {} not found".format(reg_uuid))
page = Page(
session=con.session,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
page.layout.nav.push(reg)
# This page can be visited while unauthenticated but only in the
# first phase of the onboarding process.
sName = reg.changes[-1].state.name
if sName == "pre_registration_person":
# TODO: Check TimeInterval hasn't expired
user = reg.changes[0].actor
else:
user = con.session.merge(authenticate_user(request, Forbidden))
page.layout.info.push(PageInfo(title=user.handle))
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
for o in sorted(
{i.organisation for i in mships},
key=operator.attrgetter("name")
):
page.layout.nav.push(o)
if sName == "pre_user_inetorgperson_dn":
page.layout.options.push(PosixUId())
return dict(page.termination())
display = (
(PosixUIdNumber, False),
(PosixUId, False),
(EmailAddress, False),
(BcryptedPassword, True),
(PosixGId, False),
(PublicKey, True)
)
for class_, isCreatable in display:
rsrcs = con.session.query(class_).join(Touch).join(Registration).filter(
Registration.uuid == reg_uuid).order_by(desc(Touch.at)).all()
if not rsrcs and isCreatable:
blank = class_()
blank.uuid = reg_uuid
page.layout.options.push(blank)
for r in rsrcs:
page.layout.items.push(r)
return dict(page.termination())
# TODO: Remove this view. Everything happens in registration
def user_read(request):
log = logging.getLogger("cloudhands.web.user_read")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
u_uuid = request.matchdict["user_uuid"]
actor = con.session.query(User).filter(User.uuid == u_uuid).first()
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
for o in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(o)
regs = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == u_uuid).all()
resources = [r for reg in regs for c in reg.changes for r in c.resources]
for i in resources:
page.layout.items.push(i)
#if not any(i for i in resources if isinstance(i, Label)):
# label = Label()
# label.uuid = appUuid
# page.layout.items.push(label)
return dict(page.termination())
def wsgi_app(args, cfg):
attribs = {
"macauth.master_secret": cfg["auth.macauth"]["secret"],
"args": args,
"cfg": cfg
}
config = Configurator(settings=attribs)
config.include("pyramid_chameleon")
if (cfg.has_section("auth.persona")
and cfg.getboolean("auth.persona", "enable")):
config.add_settings({
"persona.secret": cfg["auth.persona"]["secret"],
"persona.audiences": [
cfg["auth.persona"]["host"],
"http://{}:{}".format(platform.node(), args.port)],
})
config.include("pyramid_persona")
hateoas = JSON(indent=4)
hateoas.add_adapter(datetime.datetime, datetime_adapter)
hateoas.add_adapter(type(re.compile("")), regex_adapter)
hateoas.add_adapter(Serializable, record_adapter)
hateoas.add_adapter(Touch, touch_adapter)
config.add_renderer("hateoas", hateoas)
config.add_route(
"appliance", "/appliance/{app_uuid}")
config.add_view(
appliance_read,
route_name="appliance", request_method="GET",
renderer=cfg["paths.templates"]["appliance"])
config.add_view(
appliance_read,
route_name="appliance", request_method="GET",
renderer="hateoas", accept="application/json", xhr=True)
config.add_view(
appliance_modify,
route_name="appliance", request_method="POST",
renderer=cfg["paths.templates"]["appliance"])
config.add_route("top", "/")
config.add_view(
top_read, route_name="top", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["home"])
config.add_route("login", "/login")
config.add_view(
login_read,
route_name="login", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["login"])
config.add_view(
login_read, context=LoginForbidden,
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["login"])
config.add_view(
login_update, route_name="login", request_method="POST")
#renderer="hateoas", accept="application/json", xhr=None)
config.add_route("logout", "/logout")
config.add_view(
logout_update, route_name="logout", request_method="GET")
config.add_route("host", "/host/{host_uuid}")
config.add_view(
host_update, route_name="host", request_method="POST",
renderer="hateoas", accept="application/json", xhr=None)
config.add_route("membership", "/membership/{mship_uuid}")
config.add_view(
membership_read, route_name="membership", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["membership"])
config.add_view(
membership_update,
route_name="membership", request_method="POST",
renderer="hateoas", accept="application/json", xhr=None)
config.add_route("organisation", "/organisation/{org_name}")
config.add_view(
organisation_read, route_name="organisation", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["organisation"])
config.add_route(
"organisation_appliances", "/organisation/{org_name}/appliances")
config.add_view(
organisation_appliances_create,
route_name="organisation_appliances", request_method="POST")
config.add_route(
"organisation_memberships", "/organisation/{org_name}/memberships")
config.add_view(
organisation_memberships_create,
route_name="organisation_memberships", request_method="POST",
renderer="hateoas", accept="application/json", xhr=None)
config.add_route(
"organisation_catalogue", "/organisation/{org_name}/catalogue")
config.add_view(
organisation_catalogue_read,
route_name="organisation_catalogue", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["catalogue"])
config.add_route("people", "/people")
config.add_view(
people_read, route_name="people", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["people"])
config.add_view(
login_read, context=RegistrationForbidden,
renderer=cfg["paths.templates"]["registration"])
config.add_route("account", "/account/{reg_uuid}")
config.add_route("registration", "/registration/{reg_uuid}")
config.add_view(
registration_read, route_name="account", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["registration"])
config.add_view(
registration_read, route_name="registration", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["registration"])
config.add_route("registration_keys", "/registration/{reg_uuid}/keys")
config.add_view(
registration_keys,
route_name="registration_keys", request_method="POST")
#renderer="hateoas", accept="application/json", xhr=None)
config.add_route(
"registration_passwords",
"/registration/{reg_uuid}/passwords"
)
config.add_view(
registration_passwords, route_name="registration_passwords",
request_method="POST")
config.add_route("creds", "/creds")
config.add_view(
macauth_creds, route_name="creds", request_method="GET",
renderer="json", accept="application/json")
#renderer="cloudhands.web:templates/creds.pt")
config.add_route("user", "/user/{user_uuid}")
config.add_view(
user_read, route_name="user", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["user"])
config.add_static_view(name="css", path=cfg["paths.assets"]["css"])
config.add_static_view(name="html", path=cfg["paths.assets"]["html"])
config.add_static_view(name="js", path=cfg["paths.assets"]["js"])
config.add_static_view(name="img", path=cfg["paths.assets"]["img"])
authn_policy = AuthenticationStackPolicy()
authn_policy.add_policy(
"auth_tkt",
AuthTktAuthenticationPolicy(
cfg["auth.macauth"]["secret"],
callback=None)
)
authn_policy.add_policy(
"apimac",
MACAuthenticationPolicy(
attribs["macauth.master_secret"],
))
authz_policy = ACLAuthorizationPolicy()
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.scan()
app = config.make_wsgi_app()
return app
def configure(args):
logging.basicConfig(
level=args.log_level,
format="%(asctime)s %(levelname)-7s %(name)s|%(message)s")
cfgN, cfg = next(iter(settings.items()))
r = Registry()
session = r.connect(sqlite3, args.db).session
initialise(session)
return cfg, session
def main(args):
cfg, session = configure(args)
app = wsgi_app(args, cfg)
serve(app, host=platform.node(), port=args.port, url_scheme="http")
return 1
def parser(description=__doc__):
rv = argparse.ArgumentParser(description)
rv.add_argument(
"--version", action="store_true", default=False,
help="Print the current version number")
rv.add_argument(
"-v", "--verbose", required=False,
action="store_const", dest="log_level",
const=logging.DEBUG, default=logging.INFO,
help="Increase the verbosity of output")
rv.add_argument(
"--port", type=int, default=DFLT_PORT,
help="Set the port number [{}]".format(DFLT_PORT))
rv.add_argument(
"--db", default=DFLT_DB,
help="Set the path to the database [{}]".format(DFLT_DB))
rv.add_argument(
"--index", default=DFLT_IX,
help="Set the path to the index directory [{}]".format(DFLT_IX))
rv.add_argument(
"--log", default=None, dest="log_path",
help="Set a file path for log output")
return rv
def run():
p = parser()
args = p.parse_args()
if args.version:
sys.stdout.write(__version__ + "\n")
rv = 0
else:
rv = main(args)
sys.exit(rv)
if __name__ == "__main__":
run()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.