content
stringlengths 5
1.05M
|
---|
from .model import SpecDir, create_spec_dict
from .app import create_app
from . import tasks, monkey_patch
__all__ = (
"SpecDir",
"create_spec_dict",
"create_app",
"tasks",
"monkey_patch",
)
__author__ = """Ian Maurer"""
__email__ = "[email protected]"
__version__ = "0.8.2"
__uri__ = "http://www.github.com/genomoncology/specd"
__copyright__ = "Copyright (c) 2018 genomoncology.com"
__description__ = "specd: Swagger Specification Directories"
__doc__ = __description__ + " <" + __uri__ + ">"
__license__ = "MIT"
__title__ = "specd"
|
"""
Scrapy core exceptions
These exceptions are documented in docs/topics/exceptions.rst. Please don't add
new exceptions here without documenting them there.
"""
# Internal
class NotConfigured(Exception):
"""Indicates a missing configuration situation"""
pass
class _InvalidOutput(TypeError):
"""
Indicates an invalid value has been returned by a middleware's processing method.
Internal and undocumented, it should not be raised or caught by user code.
"""
pass
# HTTP and crawling
class IgnoreRequest(Exception):
"""Indicates a decision was made not to process a request"""
class DontCloseSpider(Exception):
"""Request the spider not to be closed yet"""
pass
class CloseSpider(Exception):
"""Raise this from callbacks to request the spider to be closed"""
def __init__(self, reason='cancelled'):
super(CloseSpider, self).__init__()
self.reason = reason
# Items
class DropItem(Exception):
"""Drop item from the item pipeline"""
pass
class NotSupported(Exception):
"""Indicates a feature or method is not supported"""
pass
# Commands
class UsageError(Exception):
"""To indicate a command-line usage error"""
def __init__(self, *a, **kw):
self.print_help = kw.pop('print_help', True)
super(UsageError, self).__init__(*a, **kw)
class ScrapyDeprecationWarning(Warning):
"""Warning category for deprecated features, since the default
DeprecationWarning is silenced on Python 2.7+
"""
pass
class ContractFail(AssertionError):
"""Error raised in case of a failing contract"""
pass
|
from setuptools import setup
setup(
name='connect-migration',
version='1.0',
py_modules=['connect_migration'],
url='https://github.com/ingrammicro/connect-python-sdk-migration-framework',
license='Apache Software License',
author='Ingram Micro',
description='Small middleware to ease the service migration from legacy to Connect.'
)
|
# Copyright 2021 Christian Schneider Pedersen <[email protected]>, Helene Bach Vistisen, Julian Teule, Mikkel Filt Bengtson, Victor Büttner <[email protected]>
#
# SPDX-License-Identifier: Beerware OR MIT
import ad_path
from antenna_diversity.channel import channel_models
from antenna_diversity.diversity_technique import egc
from antenna_diversity.encoding import SymbolEncoder
from antenna_diversity import modulation
import numpy as np
from antenna_diversity.protocols import dect
ad_path.nop()
# Create DECT packet
payload = b'0123456789012345678901234567890123456789'
dect_packet = dect.Full(payload)
# Modulate DECT packet
my_pam = modulation.PSK(2)
my_symbols = SymbolEncoder(2).encode_msb(dect_packet.to_bytes())
modulated_symbols = my_pam.modulate(my_symbols)
N = 3
# Creating the channel with N antennas and 10 snr
chnl = channel_models.RayleighAWGNChannel(N, 10)
r, h = chnl.run(modulated_symbols)
# Using the diversity scheme and demodulate the signal
recv = egc(r)
my_demodulate = my_pam.demodulate(recv)
print(my_demodulate)
if np.array_equal(my_demodulate, my_symbols):
print("it good")
else:
print("it not good")
|
import requests
import re
from urllib.parse import urljoin
target_url = input("Enter the full target URI: \n")
def extract_links(url):
response = requests.get(url)
return re.findall('(?:href=")(.*?)"', response.content.decode('utf-8'))
href_link = extract_links(target_url)
for link in href_link:
link = urljoin(target_url,link)
print(link)
|
# -*- coding: utf-8 -*-
#
# Podcastparser: A simple, fast and efficient podcast parser
# Copyright (c) 2012, 2013, 2014, 2018, 2020 Thomas Perl <[email protected]>
# Copyright (c) 2016, 2017, 2018, 2019, 2020 Eric Le Lay <[email protected]>
# Copyright (c) 2020 E.S. Rosenberg <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
""" Simplified, fast RSS parser """
# Will be parsed by setup.py to determine package metadata
__author__ = 'Thomas Perl <[email protected]>'
__version__ = '0.6.5'
__website__ = 'http://gpodder.org/podcastparser/'
__license__ = 'ISC License'
from xml import sax
import re
import os
import time
try:
# Python 2
from htmlentitydefs import entitydefs
entitydefs = dict((key, value.decode('latin-1')) for key, value in entitydefs.iteritems())
chr = unichr
except ImportError:
# Python 3
from html.entities import entitydefs
try:
# Python 2
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
try:
# Python 2
from rfc822 import parsedate_tz
import calendar
# This is taken from Python 3's email._parseaddr, since it handles
# pre-epoch dates better than what Python 2 does (time.mktime())
def mktime_tz(data):
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = calendar.timegm(data)
return t - data[9]
except ImportError:
# Python 3
from email.utils import mktime_tz, parsedate_tz
import logging
logger = logging.getLogger(__name__)
class Target(object):
WANT_TEXT = False
def __init__(self, key=None, filter_func=lambda x: x.strip(), overwrite=True):
self.key = key
self.filter_func = filter_func
self.overwrite = overwrite
def start(self, handler, attrs):
pass
def end(self, handler, text):
pass
class RSS(Target):
def start(self, handler, attrs):
if 'xml:base' in attrs.keys():
handler.set_base(attrs.get('xml:base'))
class PodcastItem(Target):
def end(self, handler, text):
by_published = lambda entry: entry.get('published')
order = 'type' not in handler.data or handler.data['type'] != 'serial'
handler.data['episodes'].sort(key=by_published, reverse=order)
if handler.max_episodes:
episodes = handler.data['episodes'][:handler.max_episodes]
handler.data['episodes'] = episodes
class PodcastAttr(Target):
WANT_TEXT = True
def end(self, handler, text):
handler.set_podcast_attr(self.key, self.filter_func(text))
class PodcastAttrType(Target):
WANT_TEXT = True
def end(self, handler, text):
value = self.filter_func(text)
if value in ('episodic', 'serial'):
handler.set_podcast_attr(self.key, value)
class PodcastAttrRelativeLink(PodcastAttr):
def end(self, handler, text):
text = urlparse.urljoin(handler.base, text)
super(PodcastAttrRelativeLink, self).end(handler, text)
class PodcastAttrFromHref(Target):
def start(self, handler, attrs):
value = attrs.get('href')
if value:
value = urlparse.urljoin(handler.base, value)
handler.set_podcast_attr(self.key, self.filter_func(value))
class EpisodeItem(Target):
def start(self, handler, attrs):
handler.add_episode()
def end(self, handler, text):
handler.validate_episode()
class EpisodeAttr(Target):
WANT_TEXT = True
def end(self, handler, text):
if not self.overwrite and handler.get_episode_attr(self.key):
return
handler.set_episode_attr(self.key, self.filter_func(text))
class EpisodeAttrRelativeLink(EpisodeAttr):
def end(self, handler, text):
text = urlparse.urljoin(handler.base, text)
super(EpisodeAttrRelativeLink, self).end(handler, text)
class EpisodeGuid(EpisodeAttr):
def start(self, handler, attrs):
if attrs.get('isPermaLink', 'true').lower() == 'true':
handler.set_episode_attr('_guid_is_permalink', True)
else:
handler.set_episode_attr('_guid_is_permalink', False)
def end(self, handler, text):
def filter_func(guid):
guid = guid.strip()
if handler.get_episode_attr('_guid_is_permalink'):
return urlparse.urljoin(handler.base, guid)
return guid
self.filter_func = filter_func
EpisodeAttr.end(self, handler, text)
class EpisodeAttrFromHref(Target):
def start(self, handler, attrs):
value = attrs.get('href')
if value:
value = urlparse.urljoin(handler.base, value)
handler.set_episode_attr(self.key, self.filter_func(value))
class Enclosure(Target):
def __init__(self, file_size_attribute):
Target.__init__(self)
self.file_size_attribute = file_size_attribute
def start(self, handler, attrs):
url = attrs.get('url')
if url is None:
return
url = parse_url(urlparse.urljoin(handler.base, url.lstrip()))
file_size = parse_length(attrs.get(self.file_size_attribute))
mime_type = parse_type(attrs.get('type'))
handler.add_enclosure(url, file_size, mime_type)
class AtomLink(Target):
def start(self, handler, attrs):
rel = attrs.get('rel', 'alternate')
url = parse_url(urlparse.urljoin(handler.base, attrs.get('href')))
mime_type = parse_type(attrs.get('type', 'text/html'))
file_size = parse_length(attrs.get('length', '0'))
if rel == 'enclosure':
handler.add_enclosure(url, file_size, mime_type)
elif rel == 'payment':
handler.set_episode_attr('payment_url', url)
elif mime_type == 'text/html':
if rel in ('self', 'alternate'):
if not handler.get_episode_attr('link'):
handler.set_episode_attr('link', url)
class PodcastAtomLink(AtomLink):
def start(self, handler, attrs):
rel = attrs.get('rel', 'alternate')
url = parse_url(urlparse.urljoin(handler.base, attrs.get('href')))
mime_type = parse_type(attrs.get('type'))
# RFC 5005 (http://podlove.org/paged-feeds/)
if rel == 'first':
handler.set_podcast_attr('paged_feed_first', url)
elif rel == 'next':
handler.set_podcast_attr('paged_feed_next', url)
elif rel == 'payment':
handler.set_podcast_attr('payment_url', url)
elif mime_type == 'text/html':
if rel in ('self', 'alternate'):
handler.set_podcast_attr('link', url)
class AtomContent(Target):
WANT_TEXT = True
def __init__(self):
self._want_content = False
def start(self, handler, attrs):
self._mime_type = attrs.get('type', 'text')
def end(self, handler, text):
if self._mime_type == 'html':
handler.set_episode_attr('description_html', text)
elif self._mime_type == 'text':
handler.set_episode_attr('description', squash_whitespace(text))
class RSSItemDescription(Target):
"""
RSS 2.0 almost encourages to put html content in item/description
but content:encoded is the better source of html content and itunes:summary
is known to contain the short textual description of the item.
So use a heuristic to attribute text to either description or description_html,
without overriding existing values.
"""
WANT_TEXT = True
def __init__(self):
self._want_content = False
def end(self, handler, text):
if is_html(text):
if not handler.get_episode_attr('description_html'):
handler.set_episode_attr('description_html', text.strip())
elif not handler.get_episode_attr('description'):
# don't overwrite itunes:summary?
handler.set_episode_attr('description', squash_whitespace(text))
class PodloveChapters(Target):
SUPPORTED_VERSIONS = ('1.1', '1.2')
def start(self, handler, attrs):
version = attrs.get('version', '1.1')
if version not in PodloveChapters.SUPPORTED_VERSIONS:
logger.warning('Possible incompatible chapters version: %s', version)
class PodloveChapter(Target):
def start(self, handler, attrs):
# Both the start and title attributes are mandatory
if attrs.get('start') is None or attrs.get('title') is None:
logger.warning('Invalid chapter (missing start and/or and title)')
return
chapter = {
'start': parse_time(attrs.get('start')),
'title': attrs.get('title'),
}
for optional in ('href', 'image'):
value = attrs.get(optional)
if value:
chapter[optional] = value
handler.get_episode_attr('chapters').append(chapter)
class Namespace():
# Mapping of XML namespaces to prefixes as used in MAPPING below
NAMESPACES = {
# iTunes Podcasting, http://www.apple.com/itunes/podcasts/specs.html
'http://www.itunes.com/dtds/podcast-1.0.dtd': 'itunes',
'http://www.itunes.com/DTDs/Podcast-1.0.dtd': 'itunes',
# Atom Syndication Format, http://tools.ietf.org/html/rfc4287
'http://www.w3.org/2005/Atom': 'atom',
'http://www.w3.org/2005/Atom/': 'atom',
# Media RSS, http://www.rssboard.org/media-rss
'http://search.yahoo.com/mrss/': 'media',
# From http://www.rssboard.org/media-rss#namespace-declaration:
# "Note: There is a trailing slash in the namespace, although
# there has been confusion around this in earlier versions."
'http://search.yahoo.com/mrss': 'media',
# Podlove Simple Chapters, http://podlove.org/simple-chapters
'http://podlove.org/simple-chapters': 'psc',
'http://podlove.org/simple-chapters/': 'psc',
# Purl RSS Content module
'http://purl.org/rss/1.0/modules/content/': 'content',
}
def __init__(self, attrs, parent=None):
self.namespaces = self.parse_namespaces(attrs)
self.parent = parent
@staticmethod
def parse_namespaces(attrs):
"""Parse namespace definitions from XML attributes
>>> expected = {'': 'example'}
>>> Namespace.parse_namespaces({'xmlns': 'example'}) == expected
True
>>> expected = {'foo': 'http://example.com/bar'}
>>> Namespace.parse_namespaces({'xmlns:foo':
... 'http://example.com/bar'}) == expected
True
>>> expected = {'': 'foo', 'a': 'bar', 'b': 'bla'}
>>> Namespace.parse_namespaces({'xmlns': 'foo',
... 'xmlns:a': 'bar', 'xmlns:b': 'bla'}) == expected
True
"""
result = {}
for key in attrs.keys():
if key == 'xmlns':
result[''] = attrs[key]
elif key.startswith('xmlns:'):
result[key[6:]] = attrs[key]
return result
def lookup(self, prefix):
"""Look up a namespace URI based on the prefix"""
current = self
while current is not None:
result = current.namespaces.get(prefix, None)
if result is not None:
return result
current = current.parent
return None
def map(self, name):
"""Apply namespace prefixes for a given tag
>>> namespace = Namespace({'xmlns:it':
... 'http://www.itunes.com/dtds/podcast-1.0.dtd'}, None)
>>> namespace.map('it:duration')
'itunes:duration'
>>> parent = Namespace({'xmlns:m': 'http://search.yahoo.com/mrss/',
... 'xmlns:x': 'http://example.com/'}, None)
>>> child = Namespace({}, parent)
>>> child.map('m:content')
'media:content'
>>> child.map('x:y') # Unknown namespace URI
'!x:y'
>>> child.map('atom:link') # Undefined prefix
'atom:link'
"""
if ':' not in name:
# <duration xmlns="http://..."/>
namespace = ''
namespace_uri = self.lookup(namespace)
else:
# <itunes:duration/>
namespace, name = name.split(':', 1)
namespace_uri = self.lookup(namespace)
if namespace_uri is None:
# Use of "itunes:duration" without xmlns:itunes="..."
logger.warning('No namespace defined for "%s:%s"', namespace,
name)
return '%s:%s' % (namespace, name)
if namespace_uri is not None:
prefix = self.NAMESPACES.get(namespace_uri)
if prefix is None and namespace:
# Proper use of namespaces, but unknown namespace
# logger.warning('Unknown namespace: %s', namespace_uri)
# We prefix the tag name here to make sure that it does not
# match any other tag below if we can't recognize the namespace
name = '!%s:%s' % (namespace, name)
else:
name = '%s:%s' % (prefix, name)
return name
def file_basename_no_extension(filename):
""" Returns filename without extension
>>> file_basename_no_extension('/home/me/file.txt')
'file'
>>> file_basename_no_extension('file')
'file'
"""
base = os.path.basename(filename)
name, extension = os.path.splitext(base)
return name
def squash_whitespace(text):
""" Combine multiple whitespaces into one, trim trailing/leading spaces
>>> squash_whitespace(' some\t text with a lot of spaces ')
'some text with a lot of spaces'
"""
return re.sub('\s+', ' ', text.strip())
def parse_time(value):
"""Parse a time string into seconds
See RFC2326, 3.6 "Normal Play Time" (HH:MM:SS.FRACT)
>>> parse_time('0')
0
>>> parse_time('128')
128
>>> parse_time('00:00')
0
>>> parse_time('00:00:00')
0
>>> parse_time('00:20')
20
>>> parse_time('00:00:20')
20
>>> parse_time('01:00:00')
3600
>>> parse_time(' 03:02:01')
10921
>>> parse_time('61:08')
3668
>>> parse_time('25:03:30 ')
90210
>>> parse_time('25:3:30')
90210
>>> parse_time('61.08')
61
>>> parse_time('01:02:03.500')
3723
>>> parse_time(' ')
0
"""
value = value.strip()
if value == '':
return 0
hours = minutes = seconds = fraction = 0
parsed = False
m = re.match(r'(\d+)[:](\d\d?)[:](\d\d?)([.]\d+)?$', value)
if not parsed and m:
hours, minutes, seconds, fraction = m.groups()
fraction = float(fraction or 0.0)
parsed = True
m = re.match(r'(\d+)[:](\d\d?)([.]\d+)?$', value)
if not parsed and m:
minutes, seconds, fraction = m.groups()
fraction = float(fraction or 0.0)
parsed = True
m = re.match(r'(\d+)([.]\d+)?$', value)
if not parsed and m:
seconds, fraction = m.groups()
fraction = float(fraction or 0.0)
parsed = True
if not parsed:
try:
seconds = int(value)
except ValueError:
logger.warning('Could not parse time value: "%s"', value)
return 0
return (int(hours) * 60 + int(minutes)) * 60 + int(seconds)
def parse_url(text):
return normalize_feed_url(text.strip())
def parse_length(text):
""" Parses a file length
>>> parse_length(None)
-1
>>> parse_length('0')
-1
>>> parse_length('unknown')
-1
>>> parse_length('100')
100
"""
if text is None:
return -1
try:
return int(text.strip()) or -1
except ValueError:
return -1
def parse_type(text):
""" "normalize" a mime type
>>> parse_type('text/plain')
'text/plain'
>>> parse_type('text')
'application/octet-stream'
>>> parse_type('')
'application/octet-stream'
>>> parse_type(None)
'application/octet-stream'
"""
if not text or '/' not in text:
# Maemo bug 10036
return 'application/octet-stream'
return text
def parse_pubdate(text):
"""Parse a date string into a Unix timestamp
>>> parse_pubdate('Fri, 21 Nov 1997 09:55:06 -0600')
880127706
>>> parse_pubdate('2003-12-13T00:00:00+02:00')
1071266400
>>> parse_pubdate('2003-12-13T18:30:02Z')
1071340202
>>> parse_pubdate('Mon, 02 May 1960 09:05:01 +0100')
-305049299
>>> parse_pubdate('')
0
>>> parse_pubdate('unknown')
0
"""
if not text:
return 0
parsed = parsedate_tz(text)
if parsed is not None:
try:
pubtimeseconds = int(mktime_tz(parsed))
return pubtimeseconds
except(OverflowError,ValueError):
logger.warning('bad pubdate %s is before epoch or after end of time (2038)',parsed)
return 0
try:
parsed = time.strptime(text[:19], '%Y-%m-%dT%H:%M:%S')
if parsed is not None:
m = re.match(r'^(?:Z|([+-])([0-9]{2})[:]([0-9]{2}))$', text[19:])
if m:
parsed = list(iter(parsed))
if m.group(1):
offset = 3600 * int(m.group(2)) + 60 * int(m.group(3))
if m.group(1) == '-':
offset = 0 - offset
else:
offset = 0
parsed.append(offset)
return int(mktime_tz(tuple(parsed)))
else:
return int(time.mktime(parsed))
except Exception:
pass
logger.error('Cannot parse date: %s', repr(text))
return 0
# If you modify the mapping, don't forget to also update the documentation
# section "Supported XML Elements and Attributes" in doc/index.rst
MAPPING = {
'rss': RSS(),
'rss/channel': PodcastItem(),
'rss/channel/title': PodcastAttr('title', squash_whitespace),
'rss/channel/link': PodcastAttrRelativeLink('link'),
'rss/channel/description': PodcastAttr('description', squash_whitespace),
'rss/channel/image/url': PodcastAttrRelativeLink('cover_url'),
'rss/channel/itunes:image': PodcastAttrFromHref('cover_url'),
'rss/channel/itunes:type': PodcastAttrType('type', squash_whitespace),
'rss/channel/atom:link': PodcastAtomLink(),
'rss/channel/item': EpisodeItem(),
'rss/channel/item/guid': EpisodeGuid('guid'),
'rss/channel/item/title': EpisodeAttr('title', squash_whitespace),
'rss/channel/item/link': EpisodeAttrRelativeLink('link'),
'rss/channel/item/description': RSSItemDescription(),
'rss/channel/item/itunes:summary': EpisodeAttr('description', squash_whitespace),
'rss/channel/item/media:description': EpisodeAttr('description', squash_whitespace),
'rss/channel/item/itunes:subtitle': EpisodeAttr('subtitle', squash_whitespace),
'rss/channel/item/content:encoded': EpisodeAttr('description_html'),
'rss/channel/item/itunes:duration': EpisodeAttr('total_time', parse_time),
'rss/channel/item/pubDate': EpisodeAttr('published', parse_pubdate),
'rss/channel/item/atom:link': AtomLink(),
'rss/channel/item/itunes:image': EpisodeAttrFromHref('episode_art_url'),
'rss/channel/item/media:content': Enclosure('fileSize'),
'rss/channel/item/enclosure': Enclosure('length'),
'rss/channel/item/psc:chapters': PodloveChapters(),
'rss/channel/item/psc:chapters/psc:chapter': PodloveChapter(),
# Basic support for Atom feeds
'atom:feed': PodcastItem(),
'atom:feed/atom:title': PodcastAttr('title', squash_whitespace),
'atom:feed/atom:subtitle': PodcastAttr('description', squash_whitespace),
'atom:feed/atom:icon': PodcastAttrRelativeLink('cover_url'),
'atom:feed/atom:link': PodcastAtomLink(),
'atom:feed/atom:entry': EpisodeItem(),
'atom:feed/atom:entry/atom:id': EpisodeAttr('guid'),
'atom:feed/atom:entry/atom:title': EpisodeAttr('title', squash_whitespace),
'atom:feed/atom:entry/atom:link': AtomLink(),
'atom:feed/atom:entry/atom:content': AtomContent(),
'atom:feed/atom:entry/content:encoded': EpisodeAttr('description_html'),
'atom:feed/atom:entry/atom:published': EpisodeAttr('published', parse_pubdate),
'atom:feed/atom:entry/atom:updated': EpisodeAttr('published', parse_pubdate, overwrite=False),
'atom:feed/atom:entry/media:group/media:description': EpisodeAttr('description', squash_whitespace),
'atom:feed/atom:entry/psc:chapters': PodloveChapters(),
'atom:feed/atom:entry/psc:chapters/psc:chapter': PodloveChapter(),
}
# Derive valid root elements from the supported MAPPINGs
VALID_ROOTS = set(path.split('/')[0] for path in MAPPING.keys())
class FeedParseError(sax.SAXParseException, ValueError):
"""
Exception raised when asked to parse an invalid feed
This exception allows users of this library to catch exceptions
without having to import the XML parsing library themselves.
"""
pass
class PodcastHandler(sax.handler.ContentHandler):
def __init__(self, url, max_episodes):
self.url = url
self.max_episodes = max_episodes
self.base = url
self.text = None
self.episodes = []
self.data = {
'title': file_basename_no_extension(url),
'episodes': self.episodes
}
self.path_stack = []
self.namespace = None
def set_base(self, base):
self.base = base
def set_podcast_attr(self, key, value):
self.data[key] = value
def set_episode_attr(self, key, value):
self.episodes[-1][key] = value
def get_episode_attr(self, key, default=None):
return self.episodes[-1].get(key, default)
def add_episode(self):
self.episodes.append({
# title
'description': '',
# url
'published': 0,
# guid
'link': '',
'total_time': 0,
'payment_url': None,
'enclosures': [],
'_guid_is_permalink': False,
'chapters': [],
})
def validate_episode(self):
entry = self.episodes[-1]
if len(entry['chapters']) == 0:
del entry['chapters']
# Ensures `description` does not contain HTML
if is_html(entry['description']):
if 'description_html' not in entry:
entry['description_html'] = entry['description']
entry['description'] = ''
# Sets `description` to stripped `description_html` when empty
if 'description_html' in entry and not entry['description']:
entry['description'] = remove_html_tags(entry['description_html'])
if 'guid' not in entry:
if entry.get('link'):
# Link element can serve as GUID
entry['guid'] = entry['link']
else:
if len(set(enclosure['url'] for enclosure in entry['enclosures'])) != 1:
# Multi-enclosure feeds MUST have a GUID or the same URL for all enclosures
self.episodes.pop()
return
# Maemo bug 12073
entry['guid'] = entry['enclosures'][0]['url']
if 'title' not in entry:
if len(entry['enclosures']) != 1:
self.episodes.pop()
return
entry['title'] = file_basename_no_extension(
entry['enclosures'][0]['url'])
if not entry.get('link') and entry.get('_guid_is_permalink'):
entry['link'] = entry['guid']
del entry['_guid_is_permalink']
def add_enclosure(self, url, file_size, mime_type):
self.episodes[-1]['enclosures'].append({
'url': url,
'file_size': file_size,
'mime_type': mime_type,
})
def startElement(self, name, attrs):
self.namespace = Namespace(attrs, self.namespace)
name = self.namespace.map(name)
if not self.path_stack and name not in VALID_ROOTS:
raise FeedParseError(
msg='Unsupported feed type: {}'.format(name),
exception=None,
locator=self._locator,
)
self.path_stack.append(name)
target = MAPPING.get('/'.join(self.path_stack))
if target is not None:
target.start(self, attrs)
if target.WANT_TEXT:
self.text = []
def characters(self, chars):
if self.text is not None:
self.text.append(chars)
def endElement(self, name):
target = MAPPING.get('/'.join(self.path_stack))
if target is not None:
content = ''.join(self.text) if self.text is not None else ''
target.end(self, content)
self.text = None
if self.namespace is not None:
self.namespace = self.namespace.parent
self.path_stack.pop()
def parse(url, stream, max_episodes=0):
"""Parse a podcast feed from the given URL and stream
:param url: the URL of the feed. Will be used to resolve relative links
:param stream: file-like object containing the feed content
:param max_episodes: maximum number of episodes to return. 0 (default)
means no limit
:returns: a dict with the parsed contents of the feed
"""
handler = PodcastHandler(url, max_episodes)
try:
sax.parse(stream, handler)
except sax.SAXParseException as e:
raise FeedParseError(e.getMessage(), e.getException(), e._locator)
return handler.data
def normalize_feed_url(url):
"""
Normalize and convert a URL. If the URL cannot be converted
(invalid or unknown scheme), None is returned.
This will also normalize feed:// and itpc:// to http://.
>>> normalize_feed_url('itpc://example.org/podcast.rss')
'http://example.org/podcast.rss'
If no URL scheme is defined (e.g. "curry.com"), we will
simply assume the user intends to add a http:// feed.
>>> normalize_feed_url('curry.com')
'http://curry.com/'
It will also take care of converting the domain name to
all-lowercase (because domains are not case sensitive):
>>> normalize_feed_url('http://Example.COM/')
'http://example.com/'
Some other minimalistic changes are also taken care of,
e.g. a ? with an empty query is removed:
>>> normalize_feed_url('http://example.org/test?')
'http://example.org/test'
Leading and trailing whitespace is removed
>>> normalize_feed_url(' http://example.com/podcast.rss ')
'http://example.com/podcast.rss'
Incomplete (too short) URLs are not accepted
>>> normalize_feed_url('http://') is None
True
Unknown protocols are not accepted
>>> normalize_feed_url('gopher://gopher.hprc.utoronto.ca/file.txt') is None
True
"""
url = url.strip()
if not url or len(url) < 8:
return None
# Assume HTTP for URLs without scheme
if '://' not in url:
url = 'http://' + url
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
# Schemes and domain names are case insensitive
scheme, netloc = scheme.lower(), netloc.lower()
# Normalize empty paths to "/"
if path == '':
path = '/'
# feed://, itpc:// and itms:// are really http://
if scheme in ('feed', 'itpc', 'itms'):
scheme = 'http'
if scheme not in ('http', 'https', 'ftp', 'file'):
return None
# urlunsplit might return "a slighty different, but equivalent URL"
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
HTML_TEST = re.compile('<[a-z][a-z0-9]*(?:\s.*?>|\/?>)', re.IGNORECASE | re.DOTALL)
def is_html(text):
"""Heuristically tell if text is HTML
By looking for an open tag (more or less:)
>>> is_html('<h1>HELLO</h1>')
True
>>> is_html('a < b < c')
False
"""
return bool(HTML_TEST.search(text))
def remove_html_tags(html):
"""
Remove HTML tags from a string and replace numeric and
named entities with the corresponding character, so the
HTML text can be displayed in a simple text view.
"""
if html is None:
return None
# If we would want more speed, we could make these global
re_strip_tags = re.compile('<[^>]*>')
re_unicode_entities = re.compile('&#(\d{2,4});')
re_html_entities = re.compile('&(.{2,8});')
re_newline_tags = re.compile('(<br[^>]*>|<[/]?ul[^>]*>|</li>)', re.I)
re_listing_tags = re.compile('<li[^>]*>', re.I)
result = html
# Convert common HTML elements to their text equivalent
result = re_newline_tags.sub('\n', result)
result = re_listing_tags.sub('\n * ', result)
result = re.sub('<[Pp]>', '\n\n', result)
# Remove all HTML/XML tags from the string
result = re_strip_tags.sub('', result)
# Convert numeric XML entities to their unicode character
result = re_unicode_entities.sub(lambda x: chr(int(x.group(1))), result)
# Convert named HTML entities to their unicode character
result = re_html_entities.sub(lambda x: entitydefs.get(x.group(1), ''), result)
# Convert more than two newlines to two newlines
result = re.sub('([\r\n]{2})([\r\n])+', '\\1', result)
return result.strip()
|
#!/usr/bin/python
from setuptools import setup
# SaltPY v0.1 setup.py
setup(
name = "SaltPY",
version = "0.1",
author = "Riley",
author_email = "[email protected]",
url = "https://github.com/sadminriley/",
license = "MIT",
install_requires=['paramiko'],
dependency_links=[
"git+https://github.com/sadminriley/saltpy.git#egg=saltpy-0.1"
]
)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .work_item_tracking_resource_reference import WorkItemTrackingResourceReference
class WorkItemUpdate(WorkItemTrackingResourceReference):
"""WorkItemUpdate.
:param url:
:type url: str
:param fields:
:type fields: dict
:param id:
:type id: int
:param relations:
:type relations: :class:`WorkItemRelationUpdates <work-item-tracking.v4_0.models.WorkItemRelationUpdates>`
:param rev:
:type rev: int
:param revised_by:
:type revised_by: :class:`IdentityReference <work-item-tracking.v4_0.models.IdentityReference>`
:param revised_date:
:type revised_date: datetime
:param work_item_id:
:type work_item_id: int
"""
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'fields': {'key': 'fields', 'type': '{WorkItemFieldUpdate}'},
'id': {'key': 'id', 'type': 'int'},
'relations': {'key': 'relations', 'type': 'WorkItemRelationUpdates'},
'rev': {'key': 'rev', 'type': 'int'},
'revised_by': {'key': 'revisedBy', 'type': 'IdentityReference'},
'revised_date': {'key': 'revisedDate', 'type': 'iso-8601'},
'work_item_id': {'key': 'workItemId', 'type': 'int'}
}
def __init__(self, url=None, fields=None, id=None, relations=None, rev=None, revised_by=None, revised_date=None, work_item_id=None):
super(WorkItemUpdate, self).__init__(url=url)
self.fields = fields
self.id = id
self.relations = relations
self.rev = rev
self.revised_by = revised_by
self.revised_date = revised_date
self.work_item_id = work_item_id
|
import re
def p1(inp):
bots = {}
plan = {}
for line in inp:
if line.startswith('value'):
val, bot = re.findall('bot \d+|\d+', line)
bots.setdefault(bot, []).append(int(val))
elif line.startswith('bot'):
source, low, high = re.findall('\w+ \d+', line)
plan[source] = (low, high)
while plan:
ready = [k for k, v in bots.items() if len(v) == 2]
for r in ready:
chips = bots.pop(r)
target_low, target_high = plan.pop(r)
bots.setdefault(target_low, []).append(min(chips))
bots.setdefault(target_high, []).append(max(chips))
return bots
with open('input_10.txt') as f:
out = p1(f)
print out['output 0'][0] * out['output 1'][0] * out['output 2'][0]
|
import argparse
import gzip
import sys
def find_peaks(chrom, chunk, w, d):
# create a contiguous array from chunks
offset = chunk[0][1]
last = chunk[-1][2]
cov = [0] * (last - offset)
ave = [0] * (last - offset)
for c, b, e, v in chunk:
for i in range(b, e):
idx = i - offset
cov[i-offset] = v
if len(cov) < w: return None # happens sometimes
# first window
win = cov[:w]
tot = sum(win)
ave[w//2] = tot/w
# other windows
for i in range(1, len(cov) -w):
lose = cov[i-1]
gain = cov[i+w-1]
tot -= lose
tot += gain
ave[i + w//2] = tot/w
# report peaks
beg = 0
while True:
if beg >= len(ave): break
if ave[beg] >= d:
end = beg
while True:
end += 1
if ave[end] < d: break
ac = sum(ave[beg:end]) / (end - beg +1)
yield (chrom, beg+offset, end+offset, ac)
beg = end + 1
else:
beg += 1
def overlap(peak, blacklist):
chrom = peak[0]
beg = peak[1]
end = peak[2]
if chrom not in blacklist: return 0
for b, e in blacklist[chrom]:
if b >= beg and b <= end: return 1
if e >= beg and e <= end: return 1
if b <= beg and e >= end: return 1
return 0
def output(peak):
print(peak[0], peak[1], peak[2], f'{peak[3]:.1f}')
parser = argparse.ArgumentParser(description='Windowing threshold finder')
parser.add_argument('bed', type=str, metavar='<bedfile>',
help='path to bed file')
parser.add_argument('--blacklist', required=False, type=str,
metavar='<path>', help='list of regions to ignore')
parser.add_argument('--window', required=False, type=int, default=100,
metavar='<int>', help='window size [%(default)i]')
parser.add_argument('--depth', required=False, type=int, default=10,
metavar='<int>', help='minimum read depth [%(default)i]')
arg = parser.parse_args()
# get blacklist if there is one
blacklist = {}
if arg.blacklist:
with open(arg.blacklist) as fp:
for line in fp.readlines():
if line.startswith('#'): continue
if line.startswith('\n'): continue
f = line.split()
chrom = f[0]
beg = int(f[1])
end = int(f[2])
if chrom not in blacklist: blacklist[chrom] = []
blacklist[chrom].append( (beg, end) )
if arg.bed.endswith('.gz'): fp = gzip.open(arg.bed, 'rt')
else: fp = open(arg.bed)
# separate into chunks of windowsize
chunk_chrom = None
chunk_start = 1
chunk_end = 1
chunk = []
while True:
line = fp.readline()
if line == '': break
if line.startswith('#'): continue
if line.startswith('\n'): continue
chrom, beg, end, cov = line.split()
cov = float(cov)
beg = int(beg)
end = int(end)
gap = beg - chunk_end
if chunk_chrom != chrom or gap > arg.window:
# finish previous chunk
if len(chunk) > 0:
for peak in find_peaks(chrom, chunk, arg.window, arg.depth):
if peak is None: continue
if overlap(peak, blacklist): continue
output(peak)
# start new chunk
chunk_chrom = chrom
chunk_start = beg
chunk_end = end
chunk = []
chunk.append( (chrom, beg, end, cov) )
else:
chunk_end = end
chunk.append( (chrom, beg, end, cov) )
# report last chunk
for peak in find_peaks(chrom, chunk, arg.window, arg.depth):
if peak is None: continue
if overlap(peak, blacklist): continue
output(peak)
|
print("Hello, {{greeting_recipient}}!")
|
# 3-8 Seeing The World
bucket_list = ['japan', 'france', 'spain', 'america']
print(sorted(bucket_list))
print(bucket_list)
print(sorted(bucket_list))
print(bucket_list)
print(" ")
bucket_list.reverse()
print(bucket_list)
bucket_list.reverse()
print(bucket_list)
bucket_list.sort()
print(bucket_list)
bucket_list.sort()
print(bucket_list) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-11-13 21:39:26
# @Author : Yafei ([email protected])
# @Link : http://www.cnblogs.com/mar-q/
# @Version : $Id$
import os
import sys
from django.http import HttpResponse
from django.conf import settings
from django.conf.urls import url
from django.core.wsgi import get_wsgi_application
def index(req):
return HttpResponse('HelloWorld')
urlpatterns=[
url(u'^$',index),
]
DEBUG = os.environ.get('DEBUG','on')=='on'
SECRET_KEY = os.environ.get('SECRET_KEY','{{secret_key}}')
ALLOWED_HOSTS=['*']
settings.configure(
DEBUG=DEBUG,
SECRET_KEY=SECRET_KEY,
ROOT_URLCONF=__name__,
ALLOWED_HOSTS=ALLOWED_HOSTS,
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
)
application = get_wsgi_application()
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from kgan.models.GAN import GAN
from kgan.layers.BatchNormalization import BatchNormalization
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
from tensorflow.keras.optimizers import RMSprop
import numpy as np
class WGANGP(GAN):
@classmethod
def name(cls):
return ('wgangp')
def __init__(self, input_shape, latent_dimension):
super(WGANGP, self).__init__(input_shape, latent_dimension)
self._gradient_penalty_weight = 10.
def _create_discriminator(self):
discriminator = models.Sequential(name='discriminator')
discriminator.add(
layers.Conv2D(
filters=64,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(value=0.0),
input_shape=self.input_shape()))
discriminator.add(layers.LeakyReLU(alpha=0.2))
discriminator.add(
layers.Conv2D(
filters=128,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(value=0.0)))
discriminator.add(BatchNormalization(is_training=True))
discriminator.add(layers.LeakyReLU(alpha=0.2))
discriminator.add(layers.Flatten())
discriminator.add(
layers.Dense(
units=1024,
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.02),
bias_initializer=tf.keras.initializers.Constant(value=0.0)))
discriminator.add(BatchNormalization(is_training=True))
discriminator.add(layers.LeakyReLU(alpha=0.2))
discriminator.add(
layers.Dense(
units=1,
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.02),
bias_initializer=tf.keras.initializers.Constant(value=0.0)))
self._discriminator = discriminator
self._discriminator.summary()
return (True)
def _create_generator(self):
generator_shape = (7, 7, 128)
generator_size = np.prod(generator_shape)
generator = models.Sequential(name='generator')
generator.add(
layers.Dense(
units=generator_size,
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.02),
bias_initializer=tf.keras.initializers.Constant(value=0.0),
input_shape=(self.latent_dimension(), ),
))
generator.add(BatchNormalization(is_training=True))
generator.add(layers.ReLU())
generator.add(layers.Reshape(generator_shape))
generator.add(
layers.Conv2DTranspose(
filters=64,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.02),
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(value=0.0)))
generator.add(BatchNormalization(is_training=True))
generator.add(layers.ReLU())
generator.add(
layers.Conv2DTranspose(
filters=1,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.02),
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(value=0.0)))
generator.add(layers.Activation(tf.keras.activations.tanh))
self._generator = generator
self._generator.summary()
return (True)
def _create_generator_optimizer(self, learning_rate):
optimizer = RMSprop(learning_rate=5 * learning_rate)
return (optimizer)
def _create_discriminator_optimizer(self, learning_rate):
optimizer = RMSprop(learning_rate=learning_rate)
return (optimizer)
def set_gradient_penalty_weight(self, gradient_penalty_weight):
self._gradient_penalty_weight = gradient_penalty_weight
def gradient_penalty_weight(self):
return (self._gradient_penalty_weight)
def _discriminator_loss(self, real_predictions, fake_predictions):
# Compute discriminator loss.
real_image_loss = -tf.reduce_mean(real_predictions)
fake_image_loss = tf.reduce_mean(fake_predictions)
discriminator_loss = real_image_loss + fake_image_loss
return (discriminator_loss)
def _generator_loss(self, fake_predictions):
# Compute generator loss.
generator_loss = -tf.reduce_mean(fake_predictions)
return (generator_loss)
def _create_generator_inputs(self, input_batch, number_of_samples):
generator_inputs = tf.random.uniform(
[number_of_samples, self.latent_dimension()], minval=-1, maxval=1)
return (generator_inputs)
def _gradient_penalty(self, real_images, fake_images):
with tf.GradientTape() as gp_tape:
alpha = tf.random.uniform([self.batch_size()],
0.,
1.,
dtype=tf.float32)
alpha = tf.reshape(alpha, (-1, 1, 1, 1))
sample_images = real_images + alpha * (fake_images - real_images)
gp_tape.watch(sample_images)
sample_predictions = self._discriminator(
sample_images, training=False)
if isinstance(sample_predictions, tuple):
sample_predictions = sample_predictions[0]
gradients = gp_tape.gradient(sample_predictions, sample_images)
gradients_l2_norm = tf.sqrt(
tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3]))
gradient_penalty = tf.reduce_mean((gradients_l2_norm - 1)**2)
return (gradient_penalty)
def _update_discriminator(self, input_batch):
real_images, _ = input_batch
# Sample random points in the latent space.
generator_inputs = self._create_generator_inputs(
input_batch, self.batch_size())
# Generate fake images using these random points.
fake_images = self._generator(generator_inputs)
# Train the discriminator.
with tf.GradientTape() as tape:
# Compute discriminator's predictions for real images.
real_predictions = self._discriminator(real_images)
# Compute discriminator's predictions for generated images.
fake_predictions = self._discriminator(fake_images)
#discriminator_loss = tf.reduce_mean(-real_predictions) + tf.reduce_mean(fake_predictions)
discriminator_loss = self._discriminator_loss(
real_predictions, fake_predictions)
# Compute gradient penalty using real and fake images.
gradient_penalty = self._gradient_penalty(real_images, fake_images)
# Update discriminator loss using gradient penalty value.
discriminator_loss = discriminator_loss + self.gradient_penalty_weight(
) * gradient_penalty
gradients_of_discriminator = tape.gradient(
discriminator_loss, self._discriminator.trainable_variables)
self._discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator,
self._discriminator.trainable_variables))
return (discriminator_loss)
|
"""Define tests for the Acmeda config flow."""
import aiopulse
from asynctest.mock import patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.acmeda.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry
DUMMY_HOST1 = "127.0.0.1"
DUMMY_HOST2 = "127.0.0.2"
CONFIG = {
CONF_HOST: DUMMY_HOST1,
}
@pytest.fixture
def mock_hub_discover():
"""Mock the hub discover method."""
with patch("aiopulse.Hub.discover") as mock_discover:
yield mock_discover
@pytest.fixture
def mock_hub_run():
"""Mock the hub run method."""
with patch("aiopulse.Hub.run") as mock_run:
yield mock_run
async def async_generator(items):
"""Async yields items provided in a list."""
for item in items:
yield item
async def test_show_form_no_hubs(hass, mock_hub_discover):
"""Test that flow aborts if no hubs are discovered."""
mock_hub_discover.return_value = async_generator([])
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "all_configured"
# Check we performed the discovery
assert len(mock_hub_discover.mock_calls) == 1
async def test_show_form_one_hub(hass, mock_hub_discover, mock_hub_run):
"""Test that a config is created when one hub discovered."""
dummy_hub_1 = aiopulse.Hub(DUMMY_HOST1)
dummy_hub_1.id = "ABC123"
mock_hub_discover.return_value = async_generator([dummy_hub_1])
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == dummy_hub_1.id
assert result["result"].data == {
"host": DUMMY_HOST1,
}
# Check we performed the discovery
assert len(mock_hub_discover.mock_calls) == 1
async def test_show_form_two_hubs(hass, mock_hub_discover):
"""Test that the form is served when more than one hub discovered."""
dummy_hub_1 = aiopulse.Hub(DUMMY_HOST1)
dummy_hub_1.id = "ABC123"
dummy_hub_2 = aiopulse.Hub(DUMMY_HOST1)
dummy_hub_2.id = "DEF456"
mock_hub_discover.return_value = async_generator([dummy_hub_1, dummy_hub_2])
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Check we performed the discovery
assert len(mock_hub_discover.mock_calls) == 1
async def test_create_second_entry(hass, mock_hub_run, mock_hub_discover):
"""Test that a config is created when a second hub is discovered."""
dummy_hub_1 = aiopulse.Hub(DUMMY_HOST1)
dummy_hub_1.id = "ABC123"
dummy_hub_2 = aiopulse.Hub(DUMMY_HOST2)
dummy_hub_2.id = "DEF456"
mock_hub_discover.return_value = async_generator([dummy_hub_1, dummy_hub_2])
MockConfigEntry(domain=DOMAIN, unique_id=dummy_hub_1.id, data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == dummy_hub_2.id
assert result["result"].data == {
"host": DUMMY_HOST2,
}
async def test_already_configured(hass, mock_hub_discover):
"""Test that flow aborts when all hubs are configured."""
dummy_hub_1 = aiopulse.Hub(DUMMY_HOST1)
dummy_hub_1.id = "ABC123"
mock_hub_discover.return_value = async_generator([dummy_hub_1])
MockConfigEntry(domain=DOMAIN, unique_id=dummy_hub_1.id, data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "all_configured"
|
class Point(object):
x = None
y = None
tuple = tuple()
def __init__(self, x, y):
if x < 0: raise ValueError('Point::new - X must be positive instead of {}'.format(x))
if y < 0: raise ValueError('Point::new - Y must be positive instead of {}'.format(y))
self.x = x
self.y = y
self.tuple = (x, y)
def __str__(self):
return str([self.x, self.y])
def __repr__(self):
return str([self.x, self.y])
def __eq__(self, other):
if other is None:
return False
return self.x == other.x \
and self.y == other.y |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for translate ops."""
import numpy as np
import pytest
import scipy
import tensorflow as tf
from PIL import Image
from tensorflow_addons.image import translate_ops
from tensorflow_addons.utils import test_utils
_DTYPES = {
tf.dtypes.uint8,
tf.dtypes.int32,
tf.dtypes.int64,
tf.dtypes.float16,
tf.dtypes.float32,
tf.dtypes.float64,
}
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_translate(dtype):
image = tf.constant(
[[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype
)
translation = tf.constant([-1, -1], dtype=tf.float32)
image_translated = translate_ops.translate(image, translation)
np.testing.assert_equal(
image_translated.numpy(),
[[1, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0], [0, 0, 0, 0]],
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_translations_to_projective_transforms():
translation = tf.constant([-1, -1], dtype=tf.float32)
transform = translate_ops.translations_to_projective_transforms(translation)
np.testing.assert_equal(transform.numpy(), [[1, 0, 1, 0, 1, 1, 0, 0]])
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_translate_xy():
image = np.random.randint(low=0, high=255, size=(4, 4, 3), dtype=np.uint8)
translate = np.random.randint(low=0, high=4, size=(2,), dtype=np.uint8)
translate = tf.constant(translate)
color = tf.constant([255, 0, 255], tf.dtypes.uint8)
tf_image = tf.constant(image)
pil_image = Image.fromarray(image)
translated = translate_ops.translate_xy(
image=tf_image, translate_to=tf.constant(translate), replace=color
)
expected = pil_image.rotate(
angle=0,
resample=Image.NEAREST,
translate=tuple(translate.numpy()),
fillcolor=tuple(color.numpy()),
)
np.testing.assert_equal(translated.numpy(), expected)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES - {tf.dtypes.float16})
def test_translate_xy_scalar_replace(dtype):
image = np.random.randint(low=0, high=128, size=(4, 4, 3)).astype(
dtype.as_numpy_dtype
)
translate_to = np.random.randint(low=0, high=4, size=(2,))
result = translate_ops.translate_xy(
image=image, translate_to=translate_to, replace=1
)
expected = scipy.ndimage.shift(
input=image,
shift=(translate_to[1], translate_to[0], 0),
order=0,
mode="constant",
cval=1,
)
test_utils.assert_allclose_according_to_type(result.numpy(), expected)
|
from Queue import Queue
def get_input():
first_line = map(int, raw_input().split())
second_line = map(int, raw_input().split())
return first_line[0], first_line[1], second_line
def process_data(N, K, data):
counter = 0
all = 0
stack = []
Q = Queue(maxsize = 1000000)
for i in range(len(data)):
stack.append(data[i])
for i in range(N):
shift = (3 - (stack[i] + counter) % 3) % 3
all += shift
if not i <= K - 2:
counter -= Q.get()
counter += shift
Q.put(shift)
return all
def main():
N, K, data = get_input()
print process_data(N, K, data)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tests teachers + agent implementations, assuming parser to conversations format has
already been done and teachers/agents already created.
`test_agents.py` includes functions for generating the raw data used in this file as
well as the data parser.
"""
import unittest
import copy
import parlai.core.tod.tod_core as tod_core
import parlai.core.tod.tod_test_utils.test_agents as test_agents
class TestTodAgentsAndTeachersBase(unittest.TestCase):
"""
Base class with convenience functions for setting up agents, dumping text, etc.
"""
def setup_agent_or_teacher(self, class_type, round_opt, opt):
full_opts = {**round_opt, **opt}
full_opts["datatype"] = "DUMMY"
full_opts["datafile"] = "DUMMY"
full_opts["episodes_randomization_seed"] = -1 # no random here
return class_type(full_opts)
def dump_single_utt_per_episode_agent_text(self, class_type, round_opt, opt):
"""
Continuously dumps data from an agent until it's done.
"""
agent = self.setup_agent_or_teacher(class_type, round_opt, opt)
result = []
while not agent.epoch_done():
result.append(agent.act()["text"])
agent.reset()
return result
def dump_teacher_text(self, class_type, round_opt, opt):
"""
Array where [episode_idx][turn_idx][text=0,label=1]
"""
teacher = self.setup_agent_or_teacher(class_type, round_opt, opt)
data = []
here = []
for x, new in teacher.setup_data("dummy"):
if new and len(here) > 0:
data.append(copy.deepcopy(here))
here = []
here.append([x["text"], x["label"]])
if len(here) > 0:
data.append(here)
return data
def _test_roundDataCorrect(self):
"""
Convenience function that runs on different episode setups.
Prefix with `_` since not all tests necessarily need this
"""
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__UTTERANCES_ONLY)
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__SINGLE_API_CALL)
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__MULTI_ROUND)
self._test_roundDataCorrect_helper(test_agents.EPISODE_SETUP__MULTI_EPISODE)
def _test_roundDataCorrect_helper(self, config):
"""
Implement this in downstream classes to define what is "correct" for a round (Ie
checking serialization data for a given class vs only checking utterances)
"""
raise RuntimeError("Not implemented")
class TestSystemTeacher(TestTodAgentsAndTeachersBase):
def test_apiSchemas_with_yesApiSchemas(self):
"""
Tests to make sure that data from first turn is correct when we include API
Schemas.
"""
values = self.dump_teacher_text(
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__SINGLE_API_CALL,
{"api_schemas": True},
)
self.assertEqual(
values[0][0][0],
"APIS: "
+ tod_core.SerializationHelpers.list_of_maps_to_str(
test_agents.make_api_schemas_machine(2)
),
)
def test_apiSchemas_with_noApiSchemas(self):
"""
Tests to make sure that data from first turn is correct when we do not include
API Schemas.
"""
values = self.dump_teacher_text(
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__SINGLE_API_CALL,
{"api_schemas": False},
)
self.assertEqual(values[0][0][0], "APIS: ")
def _test_roundDataCorrect_helper(self, config):
max_rounds = config[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
values = self.dump_teacher_text(test_agents.SystemTeacher, config, {})
for episode_idx, episode in enumerate(values):
utts = test_agents.get_round_utts(episode_idx, max_rounds)
comp = []
for utt in utts:
comp.append([utt[0], utt[1]])
comp.append([utt[2], utt[3]])
# Skip grounding turn cause we check it in the other teachers
self.assertEqual(episode[1:], comp)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestUserTeacher(TestTodAgentsAndTeachersBase):
def _test_roundDataCorrect_helper(self, config):
"""
Make sure that all of the User teacher data is correct relative to ground truth,
including grounding turn.
"""
max_rounds = config[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
values = self.dump_teacher_text(test_agents.UserSimulatorTeacher, config, {})
for episode_idx, episode in enumerate(values):
utts = test_agents.get_round_utts(episode_idx, max_rounds)
comp = []
comp.append(
[
"GOAL: "
+ tod_core.SerializationHelpers.list_of_maps_to_str(
test_agents.make_goal_calls_machine(max_rounds)
),
utts[0][0],
]
)
last_sys = utts[0][3]
for i in range(1, len(utts)):
comp.append([last_sys, utts[i][0]])
last_sys = utts[i][3]
self.assertEqual(episode, comp)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestGoalAgent(TestTodAgentsAndTeachersBase):
def _test_roundDataCorrect_helper(self, config):
"""
Make sure goal agent data is correct with (possibly) multiple goals.
"""
max_rounds = config[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
max_episodes = config[test_agents.TEST_NUM_EPISODES_OPT_KEY]
values = self.dump_single_utt_per_episode_agent_text(
test_agents.GoalAgent, config, {}
)
goal_text = [
"GOAL: "
+ tod_core.SerializationHelpers.list_of_maps_to_str(
test_agents.make_goal_calls_machine(max_rounds)
)
for _ in range(max_episodes)
]
self.assertEqual(values, goal_text)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestApiSchemaAgent(TestTodAgentsAndTeachersBase):
def _test_roundDataCorrect_helper(self, config):
"""
Make sure api schema information is correct with (possibly) multiple goals.
"""
max_rounds = config[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
max_episodes = config[test_agents.TEST_NUM_EPISODES_OPT_KEY]
values = self.dump_single_utt_per_episode_agent_text(
test_agents.ApiSchemaAgent, config, {}
)
apis_texts = [
"APIS: "
+ tod_core.SerializationHelpers.list_of_maps_to_str(
test_agents.make_api_schemas_machine(max_rounds)
)
for _ in range(max_episodes)
]
self.assertEqual(values, apis_texts)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestSingleGoalAgent(TestTodAgentsAndTeachersBase):
def _test_roundDataCorrect_helper(self, config):
"""
Make sure single goal agent correctly splits conversations with multiple goals
into single goals for the agent.
"""
max_rounds = config[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
max_episodes = config[test_agents.TEST_NUM_EPISODES_OPT_KEY]
values = self.dump_single_utt_per_episode_agent_text(
test_agents.SingleGoalAgent, config, {}
)
goal_text = []
for _ in range(max_episodes):
goals = test_agents.make_goal_calls_machine(max_rounds)
for x in goals:
goal_text.append(
"GOAL: " + tod_core.SerializationHelpers.list_of_maps_to_str([x])
)
self.assertEqual(values, goal_text)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestSingleApiSchemaAgent(TestTodAgentsAndTeachersBase):
def _test_roundDataCorrect_helper(self, config):
"""
Make sure single api schema agent correctly splits conversations with multiple
goals into single goals for the agent.
"""
max_rounds = config[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
max_episodes = config[test_agents.TEST_NUM_EPISODES_OPT_KEY]
values = self.dump_single_utt_per_episode_agent_text(
test_agents.SingleApiSchemaAgent, config, {}
)
apis_text = []
for _ in range(max_episodes):
apis = test_agents.make_api_schemas_machine(max_rounds)
for x in apis:
apis_text.append(
"APIS: " + tod_core.SerializationHelpers.list_of_maps_to_str([x])
)
self.assertEqual(values, apis_text)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestSingleGoalWithSingleApiSchemaAgent(TestTodAgentsAndTeachersBase):
"""
Make sure the SingleGoal + SingleApiSchema agents correspond.
"""
def _test_roundDataCorrect_helper(self, config):
goals = self.dump_single_utt_per_episode_agent_text(
test_agents.SingleGoalAgent, config, {}
)
apis = self.dump_single_utt_per_episode_agent_text(
test_agents.SingleApiSchemaAgent, config, {}
)
for i in range(len(goals)):
goal = tod_core.SerializationHelpers.str_to_goals(goals[i][len("GOALS:") :])
api = tod_core.SerializationHelpers.str_to_api_schemas(
apis[i][len("APIS:") :]
)
self.assertEqual(
goal[0].get("api_name", None), api[0].get("api_name", None)
)
def test_roundDataCorrect(self):
self._test_roundDataCorrect()
class TestLowShot(TestTodAgentsAndTeachersBase):
FEW_SHOT_SAMPLES = [0, 1, 5, 15]
PERCENTAGES = [0, 0.1, 0.3, 0.5]
def setup_agent_or_teacher(self, class_type, round_opt, opt):
full_opts = {**round_opt, **opt}
full_opts["datatype"] = "DUMMY"
full_opts["datafile"] = "DUMMY"
return class_type(full_opts)
def test_few_shot_lengths_correct(self):
def helper(n_shot):
values = self.dump_teacher_text(
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__MULTI_EPISODE_BS,
{"episodes_randomization_seed": 0, "n_shot": n_shot},
)
self.assertEqual(len(values), n_shot)
for i in self.FEW_SHOT_SAMPLES:
helper(i)
def _test_subsets(self, data_dumps):
for i in range(len(data_dumps) - 1):
small = data_dumps[i]
larger = data_dumps[i + 1]
for i, episode in enumerate(small):
self.assertEqual(episode, larger[i])
def test_few_shot_subset(self):
"""
Make sure specifying few-shot by n-shot works correctly.
"""
def helper(n_shot, seed):
return self.dump_teacher_text(
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__MULTI_EPISODE,
{"episodes_randomization_seed": seed, "n_shot": n_shot},
)
data_dumps_seed_zero = [helper(i, 0) for i in self.FEW_SHOT_SAMPLES]
self._test_subsets(data_dumps_seed_zero)
data_dumps_seed_three = [helper(i, 3) for i in self.FEW_SHOT_SAMPLES]
self._test_subsets(data_dumps_seed_three)
self.assertNotEqual(data_dumps_seed_zero[-1], data_dumps_seed_three[-1])
def test_percent_shot_lengths_correct(self):
"""
Make sure specifying few-shot by percentages works correctly.
"""
def helper(percent_shot, correct):
values = self.dump_teacher_text(
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__MULTI_EPISODE_BS, # 35 episodes
{"episodes_randomization_seed": 0, "percent_shot": percent_shot},
)
self.assertEqual(len(values), correct)
helper(0, 0)
helper(0.1, 3)
helper(0.3, 10)
def test_percent_shot_subset(self):
"""
Make sure specifying few-shot by percentages works correctly.
"""
def helper(percent_shot, seed):
return self.dump_teacher_text(
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__MULTI_EPISODE_BS, # 35 episodes
{"episodes_randomization_seed": seed, "percent_shot": percent_shot},
)
data_dumps_seed_zero = [helper(i, 0) for i in self.PERCENTAGES]
self._test_subsets(data_dumps_seed_zero)
data_dumps_seed_three = [helper(i, 3) for i in self.PERCENTAGES]
self._test_subsets(data_dumps_seed_three)
def test_correct_throw_when_both_shots_defined(self):
self.assertRaises(
RuntimeError,
self.dump_teacher_text,
test_agents.SystemTeacher,
test_agents.EPISODE_SETUP__MULTI_EPISODE_BS, # 35 episodes
{"episodes_randomization_seed": 0, "percent_shot": 0.3, "n_shot": 3},
)
if __name__ == "__main__":
unittest.main()
|
"""
*Simple-Attribute-Select*
"""
|
# -*- coding: utf-8 -*-
from copy import copy
from pandas import Grouper
from shapely.geometry import LineString
from .trajectory import Trajectory
from .trajectory_collection import TrajectoryCollection
from .geometry_utils import measure_distance_spherical, measure_distance_euclidean
class TrajectorySplitter:
"""
Splitter base class
"""
def __init__(self, traj):
"""
Create TrajectoryGeneralizer
Parameters
----------
traj : Trajectory/TrajectoryCollection
"""
self.traj = traj
def split(self, **kwargs):
"""
Split the input Trajectory/TrajectoryCollection.
Parameters
----------
kwargs : any type
Split parameters, differs by splitter
Returns
-------
TrajectoryCollection
Split trajectories
"""
if isinstance(self.traj, Trajectory):
return self._split_traj(self.traj, **kwargs)
elif isinstance(self.traj, TrajectoryCollection):
return self._split_traj_collection(**kwargs)
else:
raise TypeError
def _split_traj_collection(self, **kwargs):
trips = []
for traj in self.traj:
for x in self._split_traj(traj, **kwargs):
if x.get_length() > self.traj.min_length:
trips.append(x)
result = copy(self.traj)
result.trajectories = trips
return result
def _split_traj(self, traj, **kwargs):
return traj
class TemporalSplitter(TrajectorySplitter):
"""
Split trajectories into subtrajectories using regular time intervals.
Parameters
----------
mode : str
Split mode
Examples
--------
>>> mpd.TemporalSplitter(traj).split(mode="year")
"""
def _split_traj(self, traj, mode='day'):
result = []
if mode == 'day':
grouped = traj.df.groupby(Grouper(freq="D"))
elif mode == 'month':
grouped = traj.df.groupby(Grouper(freq="M"))
elif mode == 'year':
grouped = traj.df.groupby(Grouper(freq="Y"))
else:
raise ValueError('Invalid split mode {}. Must be one of [day, month, year]'.format(mode))
for key, values in grouped:
if len(values) > 1:
result.append(Trajectory(values, '{}_{}'.format(traj.id, key)))
return TrajectoryCollection(result)
class ObservationGapSplitter(TrajectorySplitter):
"""
Split trajectories into subtrajectories whenever there is a gap in the observations.
Parameters
----------
gap : datetime.timedelta
Time gap threshold
Examples
--------
>>> mpd.ObservationGapSplitter(traj).split(gap=timedelta(hours=1))
"""
def _split_traj(self, traj, gap):
result = []
temp_df = traj.df.copy()
temp_df['t'] = temp_df.index
temp_df['gap'] = temp_df['t'].diff() > gap
temp_df['gap'] = temp_df['gap'].apply(lambda x: 1 if x else 0).cumsum()
dfs = [group[1] for group in temp_df.groupby(temp_df['gap'])]
for i, df in enumerate(dfs):
df = df.drop(columns=['t', 'gap'])
if len(df) > 1:
result.append(Trajectory(df, '{}_{}'.format(traj.id, i)))
return TrajectoryCollection(result)
class SpeedSplitter(TrajectorySplitter):
"""
Split trajectories if there are no speed measurements above the speed limit for the specified duration.
Parameters
----------
speed : float
Speed limit
duration : datetime.timedelta
Minimum stop duration
Examples
--------
>>> mpd.SpeedSplitter(traj).split(speed=10, duration=timedelta(minutes=5))
"""
def _split_traj(self, traj, speed, duration):
traj = traj.copy()
speed_col_name = traj.get_speed_column_name()
if speed_col_name not in traj.df.columns:
traj.add_speed(overwrite=True)
traj.df = traj.df[traj.df[speed_col_name] >= speed]
return ObservationGapSplitter(traj).split(gap=duration)
|
# The following comments couldn't be translated into the new config version:
#,
import FWCore.ParameterSet.Config as cms
#------------------
# clustering:
#------------------
# BasicCluster producer
from RecoEcal.EgammaClusterProducers.cosmicBasicClusters_cfi import *
# SuperCluster producer
from RecoEcal.EgammaClusterProducers.cosmicSuperClusters_cfi import *
# SuperCluster with Preshower producer
#include "RecoEcal/EgammaClusterProducers/data/SuperClustersWithPreshower.cfi"
# create sequence for clustering
cosmicClusteringTask = cms.Task(cosmicBasicClusters, cosmicSuperClusters)
cosmicClusteringSequence = cms.Sequence(cosmicClusteringTask)
|
import os
# test on CPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ["JAX_PLATFORMS"] = "cpu"
|
"""Implemented arrival classes for different distributions"""
from abc import abstractmethod
from math import exp
from nc_arrivals.arrival import Arrival
class ArrivalDistribution(Arrival):
"""Abstract class for arrival processes that are of
a distinct distribution."""
@abstractmethod
def sigma(self, theta: float) -> float:
"""
sigma(theta)
:param theta: mgf parameter
"""
pass
@abstractmethod
def rho(self, theta: float) -> float:
"""
rho(theta)
:param theta: mgf parameter
"""
pass
@abstractmethod
def is_discrete(self) -> bool:
"""
:return True if the arrival distribution is discrete, False if not
"""
pass
@abstractmethod
def average_rate(self) -> float:
pass
def to_name(self) -> str:
return self.__class__.__name__
def transient_bound(self, theta: float, delta_time: int) -> float:
if delta_time < 0:
raise ValueError(f"time is non-negative")
return exp(
theta *
(self.rho(theta=theta) * delta_time + self.sigma(theta=theta)))
@abstractmethod
def to_value(self, number=1, show_m=False) -> str:
pass
|
import socket
from socketWrapper import *
import json
from handleMessages import HandleMessages
IP = "127.0.0.1"
PORT = 2020
class SocketConsumer:
def __init__(self, _id_consumer):
self._id_consumer = _id_consumer
self._receiver_socket = None
self._socket_wrapper = None
self._handle_messages = HandleMessages(_id_consumer)
self.open_socket()
def open_socket(self):
self._receiver_socket = socket.socket()
_flag = True
while _flag:
try:
self._receiver_socket.connect((IP, PORT))
print(f"the sender socket is up and connected to receiver{self._id_consumer}.")
_flag = False
except Exception:
print("the sender socket is not up still.")
self._socket_wrapper = SocketWrapper(self._receiver_socket)
def close_socket(self):
self._receiver_socket.close()
def consume_data(self):
data = json.loads(self._socket_wrapper.read_with_len())
self._handle_messages.handler(data)
def run(self):
non_stop = True
# trying to get an images files from the sender all the time.
while non_stop:
self.consume_data()
self.close_socket()
|
from __future__ import print_function, division, absolute_import
from time import sleep
from distributed.system_monitor import SystemMonitor
def test_SystemMonitor():
sm = SystemMonitor()
a = sm.update()
sleep(0.01)
b = sm.update()
assert sm.cpu
assert sm.memory
assert set(a) == set(b)
assert all(rb >= 0 for rb in sm.read_bytes)
assert all(wb >= 0 for wb in sm.write_bytes)
assert all(len(q) == 3 for q in sm.quantities.values())
assert 'cpu' in repr(sm)
def test_count():
sm = SystemMonitor(n=5)
assert sm.count == 1
sm.update()
assert sm.count == 2
for i in range(10):
sm.update()
assert sm.count == 12
for v in sm.quantities.values():
assert len(v) == 5
def test_range_query():
sm = SystemMonitor(n=5)
assert all(len(v) == 1 for v in sm.range_query(0).values())
assert all(len(v) == 0 for v in sm.range_query(123).values())
sm.update()
sm.update()
sm.update()
assert all(len(v) == 4 for v in sm.range_query(0).values())
assert all(len(v) == 3 for v in sm.range_query(1).values())
for i in range(10):
sm.update()
assert all(len(v) == 4 for v in sm.range_query(10).values())
assert all(len(v) == 5 for v in sm.range_query(0).values())
|
"""Dropping old integration with readability left overs
Revision ID: f4543055e780
Revises: f5978c8a8740
Create Date: 2020-11-08 20:28:31.145702
"""
import logging
import sqlalchemy as sa
from alembic import op
logger = logging.getLogger(__name__)
revision = 'f4543055e780'
down_revision = 'f5978c8a8740'
branch_labels = None
depends_on = None
def upgrade():
logger.info('dropping readability_key column from user')
op.drop_column('user', 'readability_key')
def downgrade():
op.add_column('user', sa.Column('readability_key',
sa.String(), nullable=True))
|
# =====================================================================================================================
#
# Tensor Abstraction Layer Objects 0.1.0-ALPHA
# MACHINE LEARNING EXPERIMENTS
#
# Framework design by Pantelis I. Kaplanoglou
# Licensed under the MIT License
#
# =====================================================================================================================
import tensorflow as tf
import os
import sys
import json
import random
import numpy as np
import TALOS.Constants as tcc
from TALOS.Core import Logger, MinuteUID, SysParams, Exec, ERLString
from TALOS.Utils import OperatingSystemSignature
from TALOS.FileSystem import Storage, BaseFolders
from TALOS.Metrics import ClassificationMetrics, ClassificationBest
from TALOS.HyperParams import NNLearnConfig
#------------------------------------------------------------------------------------
def GetModelAndFoldCommandArguments(p_sDefaultModelName=None, p_nDefaultFoldNumber=10, p_nDefaultIsEvaluating=False):
sModelName=p_sDefaultModelName
nFoldNumber=p_nDefaultFoldNumber
bIsEvaluating=p_nDefaultIsEvaluating
if len(sys.argv) > 1:
sModelName=sys.argv[1]
if len(sys.argv) > 2:
nFoldNumber=int(sys.argv[2])
if len(sys.argv) > 3:
bIsEvaluating=(int(sys.argv[3]) == 1)
return sModelName, nFoldNumber, bIsEvaluating
#------------------------------------------------------------------------------------
#==================================================================================================
class MonitoringSettings(object):
#------------------------------------------------------------------------------------
def __init__(self, p_sFileName):
#........ | Instance Attributes | ..............................................
self.FileName = p_sFileName
self.ModelsToCompare=None
self.ModelTitles=None
self.ModelBaseFolders=None
#................................................................................
self.Load()
#------------------------------------------------------------------------------------
def Load(self):
if self.FileName is not None:
with open(self.FileName) as oDefFile:
dJSON = json.load(oDefFile)
self.ModelsToCompare=[]
self.ModelTitles=[]
self.ModelBaseFolders=[]
oModels=dJSON["CompareModels"]
for oModel in oModels:
print("Adding to comparison:", oModel[0], ":", "'%s'" % oModel[1])
self.ModelsToCompare.append(oModel[0])
self.ModelTitles.append(oModel[1])
self.ModelBaseFolders.append(oModel[2])
#------------------------------------------------------------------------------------
#==================================================================================================
#==================================================================================================
class StatsColumnType(object):
VALUE = 0
DELTA_VALUE = 1
ARCTAN_DELTA_VALUE = 2
COUNT = 3
@classmethod
def ToString(cls, p_sBaseDescr, p_nType):
p_sBaseDescr = p_sBaseDescr.replace(" ", "")
if p_nType == StatsColumnType.VALUE:
return p_sBaseDescr
elif p_nType == StatsColumnType.DELTA_VALUE:
return "Δ%s" % p_sBaseDescr
elif p_nType == StatsColumnType.ARCTAN_DELTA_VALUE:
return "ArcTan(Δ%s)" % p_sBaseDescr
return
#==================================================================================================
#TODO: 0.7 Setup file for testing different DNAs
#==================================================================================================
class ExperimentSubFolder(object):
NO_SUBFOLDERS="{None}"
#------------------------------------------------------------------------------------
def __init__(self, p_oParent, p_nFoldNumber, p_bIsRun=False):
#........ | Instance Attributes | ..............................................
self.ParentExperiment = p_oParent
self.FoldNumber = p_nFoldNumber
self.IsRun = p_bIsRun
if self.IsRun:
self.Folder = os.path.join(self.ParentExperiment.RunBaseFolder, "fold%.2d" % self.FoldNumber )
else:
self.Folder = os.path.join(self.ParentExperiment.BaseFolder , "fold%.2d" % self.FoldNumber )
Storage.EnsurePathExists(self.Folder)
sFolders = Storage.GetDirectoriesSorted(self.Folder)
if len(sFolders) > 0:
self.LastUID = sFolders[-1]
else:
self.LastUID = ExperimentSubFolder.NO_SUBFOLDERS
self.__pathsToEnsure=None
#self.__initExperimentFolder(self.LastUID)
#................................................................................
#------------------------------------------------------------------------------------
def __defineExperimentFolders(self, p_sUID):
self.__pathsToEnsure=[]
self.ExperimentFolder = os.path.join(self.Folder, p_sUID)
self.ExperimentLogFolder = os.path.join(self.ExperimentFolder, "log")
self.ExperimentStatsFolder = os.path.join(self.ExperimentFolder, "stats")
self.ExperimentResultsFolder = os.path.join(self.ExperimentFolder, "results")
self.ExperimentModelFolder = os.path.join(self.ExperimentFolder, "models")
self.ExperimentGraphsFolder = os.path.join(self.ExperimentFolder, "graphs")
self.ExperimentPlotFolder = os.path.join(self.ExperimentGraphsFolder, "plots")
self.ExperimentVisualsFolder = os.path.join(self.ExperimentFolder, "visuals")
self.ExperimentConfigFolder = os.path.join(self.ExperimentFolder, "config")
self.ExperimentBestsFolder = os.path.join(self.ExperimentFolder, "best")
self.ExperimentLogSamplesFolder = os.path.join(self.ExperimentLogFolder, "samples")
assert self.ParentExperiment.ModelName is not None, "Experiment model architecture is not defined"
self.ArchitectureCommonFolder = os.path.join(self.ParentExperiment.RunBaseFolder, "common")
self.__pathsToEnsure.append(self.ExperimentFolder)
self.__pathsToEnsure.append(self.ExperimentLogFolder)
self.__pathsToEnsure.append(self.ExperimentStatsFolder)
self.__pathsToEnsure.append(self.ExperimentResultsFolder)
self.__pathsToEnsure.append(self.ExperimentModelFolder)
self.__pathsToEnsure.append(self.ExperimentGraphsFolder)
self.__pathsToEnsure.append(self.ExperimentPlotFolder)
self.__pathsToEnsure.append(self.ExperimentVisualsFolder)
self.__pathsToEnsure.append(self.ExperimentConfigFolder)
self.__pathsToEnsure.append(self.ExperimentBestsFolder)
self.__pathsToEnsure.append(self.ExperimentLogSamplesFolder)
self.__pathsToEnsure.append(self.ArchitectureCommonFolder)
# // Saved model file system names \\
self.CommonInitialModelFileName = os.path.join(self.Folder , "init.nn")
self.InitialModelFileName = os.path.join(self.ExperimentModelFolder , "init_%s.nn" % p_sUID)
self.InitialModelZipFileName = os.path.join(self.ArchitectureCommonFolder , "initial-model_%s.zip" % p_sUID)
self.ModelFolderTemplate = os.path.join(self.ExperimentModelFolder , "%.3d" )
self.ModelFileNameTemplate = os.path.join(self.ModelFolderTemplate , "model_%s.nn" % p_sUID)
self.BestModelFileName = os.path.join(self.ExperimentBestsFolder , "best_%s.nn" % p_sUID)
self.InfoFileName = os.path.join(self.ExperimentFolder , "info.dat")
# // Evaluation results file system names \\
self.ModelResultsFileNameTemplate = os.path.join(self.ExperimentResultsFolder , "%.3d.pkl")
self.BestModelResultsFileName = os.path.join(self.ExperimentBestsFolder , "best_%s.pkl" % p_sUID)
self.BestModelTextFileName = os.path.join(self.ExperimentBestsFolder , "best_%s.txt" % p_sUID)
#self.BestEvaluationFileName = os.path.join(self.ExperimentBestsFolder , "best_evaluation_%s.csv" % p_sUID)
self.NetworkArchitectureFileName = os.path.join(self.ExperimentConfigFolder , "architecture_%s.csv" % p_sUID)
self.LogFileName = os.path.join(self.ExperimentLogFolder , "log_%s.txt" % p_sUID)
self.StatsFileName = os.path.join(self.ExperimentStatsFolder , "stats_%s.dat" % p_sUID)
self.StatsTempFileName = os.path.join(self.ExperimentStatsFolder , "stats_%s.tmp" % p_sUID)
self.MarginSamplesTemplate = os.path.join(self.ExperimentLogSamplesFolder , "sample-margins-%.3d.txt")
self.MarginHistogramTemplate = os.path.join(self.ExperimentGraphsFolder , "histogram-margins-%.3d.png")
self.LearnConfigFileName = os.path.join(self.ExperimentConfigFolder , "learn-config-source.cfg")
self.LearnConfigUsedFileName = os.path.join(self.ExperimentConfigFolder , "learn-config-used-%s.cfg" % p_sUID)
#------------------------------------------------------------------------------------
def Initialize(self, p_sUID, p_bIsEnsuringPaths=True):
self.__defineExperimentFolders(p_sUID)
if p_bIsEnsuringPaths:
for sFolder in self.__pathsToEnsure:
Storage.EnsurePathExists(sFolder)
#------------------------------------------------------------------------------------
def ListSavedResults(self):
if Storage.IsExistingPath(self.ExperimentResultsFolder):
sModelResultFiles = Storage.GetFilesSorted(self.ExperimentResultsFolder)
oModelResults = []
for sResultFile in sModelResultFiles:
_, sFileName, _ = Storage.SplitFileName(sResultFile)
nEpochNumber = int(sFileName)
oModelResults.append([nEpochNumber, sResultFile, None])
return oModelResults
#------------------------------------------------------------------------------------
def ListSavedModels(self):
sModelFolders = []
if Storage.IsExistingPath(self.ExperimentModelFolder):
if not Storage.IsFolderEmpty(self.ExperimentModelFolder):
sModelFolders = Storage.GetDirectoriesSorted(self.ExperimentModelFolder)
oModels = []
for sModel in sModelFolders:
sFolder = Storage.JoinPath(self.ExperimentModelFolder, sModel)
sModelFiles = Storage.GetFilesSorted(sFolder)
nEpochNumber = int(sModel)
oModels.append([nEpochNumber, sFolder, sModelFiles])
return oModels
#------------------------------------------------------------------------------------
def ListCompressedModels(self):
sResult = []
if Storage.IsExistingPath(self.ExperimentModelFolder):
sModelZipFiles = Storage.GetFilesSorted(self.ExperimentModelFolder)
for sZipFile in sModelZipFiles:
sZipFile = Storage.JoinPath(self.ExperimentModelFolder, sZipFile)
sResult.append(sZipFile)
return sResult
#------------------------------------------------------------------------------------
def DiscardModels(self, p_nEpochNumbers):
for nEpochNumberToDelete in p_nEpochNumbers:
self.DeleteSavedModel(nEpochNumberToDelete)
#------------------------------------------------------------------------------------
def CompressModels(self, p_nEpochNumbers):
sUID = self.ParentExperiment.MinuteUID.UID
for nEpochToCompress in p_nEpochNumbers:
sModelFolder = self.ModelFolderTemplate % nEpochToCompress
bContinueToDelete, sArchiveName = Storage.CompressFolder(sModelFolder, "model_%s_epoch_%.3d.zip" % (sUID, nEpochToCompress))
if bContinueToDelete:
bContinueToDelete = Storage.IsExistingFile(sArchiveName)
if bContinueToDelete:
self.DeleteSavedModel(nEpochToCompress)
#------------------------------------------------------------------------------------
def DeleteSavedModel(self, p_nEpochNumber):
sModelFolder = self.ModelFolderTemplate % p_nEpochNumber
Storage.RemoveFolder(sModelFolder)
#------------------------------------------------------------------------------------
#==================================================================================================
#==================================================================================================
class ExperimentQueueSystem(object):
#------------------------------------------------------------------------------------
def __init__(self):
#....................... | Instance Attributes | ...............................
self.BaseFolder = BaseFolders.EXPERIMENTS_SYSTEM
self.ToEvaluteFolder = Storage.JoinPath(self.BaseFolder , "toevaluate")
self.PendingFolder = Storage.JoinPath(self.BaseFolder , "pending")
self.ArchiveFolder = Storage.JoinPath(self.BaseFolder , "archive")
self.ErrorFolder = Storage.JoinPath(self.PendingFolder , "errors")
self.EditFolder = Storage.JoinPath(self.BaseFolder , "edit")
self.RecombineFolder = Storage.JoinPath(self.BaseFolder , "recombine")
self.CountersFileName = Storage.JoinPath(self.BaseFolder, "counters")
self.TemplateName = None
self.TemplateConfigFileName = None
#................................................................................
Storage.EnsurePathExists(self.BaseFolder)
Storage.EnsurePathExists(self.PendingFolder)
Storage.EnsurePathExists(self.ArchiveFolder)
Storage.EnsurePathExists(self.ErrorFolder)
Storage.EnsurePathExists(self.EditFolder)
Storage.EnsurePathExists(self.ToEvaluteFolder)
Storage.EnsurePathExists(self.RecombineFolder)
self.SetTemplateName(None)
#------------------------------------------------------------------------------------
def LoopExecution(self, p_sExecutableName):
bContinue = True
while bContinue and (self.GetNextConfig() is not None):
oConfig = self.LoadNextConfig()
print("[TALOS]: Next experiment in queue:%s" % oConfig.FileName)
nResult = Exec.Python(p_sExecutableName, [oConfig.Architecture, oConfig.DataSetName, str(oConfig.FoldNumber)])
bContinue = (nResult == 0)
if not bContinue:
print("[TALOS]: Error occured:%s" % oConfig.FileName)
self.ArchiveConfigAsError(oConfig.FileName)
#------------------------------------------------------------------------------------
def SetTemplateName(self, p_sName):
if p_sName is not None:
self.TemplateName = p_sName
else:
self.TemplateName = "template.cfg"
self.TemplateConfigFileName = Storage.JoinPath(self.EditFolder, self.TemplateName)
#------------------------------------------------------------------------------------
def __sequenceLearningRate(self, p_sSourceFileName, p_nLearningRateSequence, p_nCounter):
print(" -> Sequencing learning rates from template")
sResult=[]
nCounter = p_nCounter
for nLearningRate in p_nLearningRateSequence:
oNewConfig = NNLearnConfig()
oNewConfig.LoadFromFile(p_sSourceFileName)
oNewConfig.Learn.LearningRate = nLearningRate
_, sName, _ = Storage.SplitFileName(p_sSourceFileName)
sDestFileName = Storage.JoinFileName(self.PendingFolder, "%.3d-" % p_nCounter + sName + "-lr%.6f" % nLearningRate, ".cfg")
sResult.append(sDestFileName)
oNewConfig.SaveToFile(sDestFileName)
nCounter += 1
return nCounter, sResult
#------------------------------------------------------------------------------------
def __sequenceFoldNumber(self, p_sSourceFileName, p_nFoldSequence, p_nCounter):
print(" -> Sequencing fold numbers from template")
sResult=[]
nCounter = p_nCounter
for nFoldNumber in p_nFoldSequence:
oNewConfig = NNLearnConfig()
oNewConfig.LoadFromFile(p_sSourceFileName)
oNewConfig.FoldNumber = nFoldNumber
_, sName, _ = Storage.SplitFileName(p_sSourceFileName)
sDestFileName = Storage.JoinFileName(self.PendingFolder, "%.3d-" % p_nCounter + sName + "-fold%d" % nFoldNumber, ".cfg")
sResult.append(sDestFileName)
oNewConfig.SaveToFile(sDestFileName)
nCounter += 1
return nCounter, sResult
#------------------------------------------------------------------------------------
def __readCounter(self):
""" Gets the current run/evaluation counter """
self.Counter = Storage.DeserializeObjectFromFile(self.CountersFileName)
if self.Counter is None:
self.Counter = {"FormatVersion":"TALOS10", "RunCounter": 1}
nCounter = 1
else:
nCounter = self.Counter["RunCounter"]
return nCounter
#------------------------------------------------------------------------------------
def __writeCounter(self, p_nNumber):
self.Counter["RunCounter"] = p_nNumber
Storage.SerializeObjectToFile(self.CountersFileName, self.Counter, True)
#------------------------------------------------------------------------------------
def IncCounter(self):
nCounter = self.__readCounter()
nCounter += 1
self.__writeCounter(nCounter)
return nCounter
#------------------------------------------------------------------------------------
def AddConfig(self, p_sConfigFileName=None):
if p_sConfigFileName is None:
sSourceFileName = self.TemplateConfigFileName
else:
sSourceFileName = p_sConfigFileName
_, sName, _ = Storage.SplitFileName(sSourceFileName)
# Gets the current run/evaluation counter
nCounter = self.__readCounter()
oConfig = NNLearnConfig()
oConfig.LoadFromFile(sSourceFileName)
sDestFileNames = None
if oConfig.LearningRateSequence is not None:
nCounter, sDestFileNames = self.__sequenceLearningRate(sSourceFileName, oConfig.LearningRateSequence, nCounter)
elif oConfig.FoldSequence is not None:
nCounter, sDestFileNames = self.__sequenceFoldNumber(sSourceFileName, oConfig.FoldSequence, nCounter)
# for nFoldNumber in oConfig.FoldSequence:
# oNewConfig = NNLearnConfig()
# oNewConfig.LoadFromFile(sSourceFileName)
# oNewConfig.FoldNumber = nFoldNumber
# sDestFileName = Storage.JoinFileName(self.PendingFolder, "%.3d-" % nCounter + sName, ".cfg")
# oNewConfig.SaveToFile(sDestFileName)
# nCounter += 1
else:
sDestFileNames = [Storage.JoinFileName(self.PendingFolder, "%.3d-" % nCounter + sName , ".cfg")]
Storage.CopyFile(sSourceFileName, sDestFileNames[0])
nCounter += 1
# Saves the current run/evaluation counter
self.__writeCounter()
return sDestFileNames
#------------------------------------------------------------------------------------
def GetNextConfig(self):
# By priority first evaluates models to save disk space and then start training
sResult = self.GetNextConfigToEvaluate()
if sResult is None:
sFiles = Storage.GetFilesSorted(self.PendingFolder)
sConfigFiles = []
for sFile in sFiles:
_, _, sExt = Storage.SplitFileName(sFile)
if sExt == ".cfg":
sConfigFiles.append(Storage.JoinPath(self.PendingFolder, sFile))
if len(sFiles) > 0:
sResult = sConfigFiles[0]
else:
sResult = None
return sResult
#------------------------------------------------------------------------------------
def GetNextConfigToEvaluate(self):
sFiles = Storage.GetFilesSorted(self.ToEvaluteFolder)
sConfigFiles = []
for sFile in sFiles:
_, _, sExt = Storage.SplitFileName(sFile)
if sExt == ".cfg":
sConfigFiles.append(Storage.JoinPath(self.ToEvaluteFolder, sFile))
if len(sFiles) > 0:
sResult = sConfigFiles[0]
else:
sResult = None
return sResult
#------------------------------------------------------------------------------------
def EnsureConfig(self, p_sTemplateName):
# Gets the next configuration, or copies from the current template file
sConfigFileName = self.GetNextConfig()
if sConfigFileName is None:
# Sets the current configuration template file
self.SetTemplateName(p_sTemplateName)
self.AddConfig()
#------------------------------------------------------------------------------------
def LoadNextConfig(self):
oConfig = None
sNextConfigFileName = self.GetNextConfig()
if sNextConfigFileName is not None:
oConfig = NNLearnConfig()
oConfig.LoadFromFile(sNextConfigFileName)
# Supports the evaluation queue
if sNextConfigFileName == self.GetNextConfigToEvaluate():
oConfig.ParseUID()
oConfig.IsTraining = False
oConfig.IsEvaluating = True
oConfig.IsDeterminingBest = True
return oConfig
#------------------------------------------------------------------------------------
def ArchiveConfig(self, p_sConfigFileName=None, p_sDestFileName=None):
if p_sConfigFileName is None:
p_sConfigFileName = self.GetNextConfig()
Storage.MoveFileToFolder(p_sConfigFileName, self.ArchiveFolder, p_sDestFileName)
#------------------------------------------------------------------------------------
def ArchiveConfigAsError(self, p_sConfigFileName=None):
if p_sConfigFileName is None:
p_sConfigFileName = self.GetNextConfig()
Storage.MoveFileToFolder(p_sConfigFileName, self.ErrorFolder)
#------------------------------------------------------------------------------------
#==================================================================================================
#==================================================================================================
class ExperimentFolder(object):
__verboseLevel = 2
#------------------------------------------------------------------------------------
@classmethod
def SplitExperimentCode(cls, p_sExperimentCode):
sParts = p_sExperimentCode.split("/")
return int(sParts[0]), sParts[1]
#------------------------------------------------------------------------------------
@classmethod
def GetExperimentName(cls, p_sModelName, p_sDataSetName=None):
if p_sDataSetName is None:
sResult = p_sModelName
else:
sResult = p_sDataSetName + "-" + p_sModelName
return sResult
#------------------------------------------------------------------------------------
@classmethod
def GetLastRunConfig(cls, p_sModelName, p_sDataSetName, p_nFoldNumber):
oExp = ExperimentFolder(p_sModelName, p_sDataSetName)
oExp.__setFoldNumber(p_nFoldNumber)
oExp.RunSub.Initialize(oExp.RunSub.LastUID, p_bIsEnsuringPaths=False)
return oExp.RunSub.LearnConfigUsedFileName
#------------------------------------------------------------------------------------
@classmethod
def GetExperiment(cls, p_sFolder, p_sCustomBaseFolder=None):
oResult = None
oConfig = NNLearnConfig.GetConfig(p_sFolder)
if oConfig is not None:
oResult = ExperimentFolder(p_oLearnConfig=oConfig)
if p_sCustomBaseFolder is not None:
oResult.RunBaseFolder = p_sCustomBaseFolder
assert oConfig.SavedExperimentUID is not None
oResult.Open(oConfig.FoldNumber, oConfig.SavedExperimentUID, p_bIsVerbose=False)
oResult.LearnConfig = oConfig
return oResult
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
def __init__(self, p_sModelName=None, p_sDataSetName=None, p_nBatchSize=15, p_oLearnConfig=None):
#........ | Instance Attributes | ..............................................
self.ERL = None
self.LearnConfig = p_oLearnConfig
if self.LearnConfig is not None:
p_sModelName = p_oLearnConfig.Architecture
p_sDataSetName = p_oLearnConfig.DataSetName
p_nBatchSize = p_oLearnConfig.BatchSize
self.ModelName = p_sModelName
self.DataSetName = p_sDataSetName
self.Name = ExperimentFolder.GetExperimentName(self.ModelName, self.DataSetName)
self.Code = None
# if self.DataSetName is None:
# self.Name = p_sModelName
# else :
# self.Name = self.DataSetName + "-" + p_sModelName
self.BatchSize = p_nBatchSize
assert self.Name is not None, "Experiment name is not provided"
self.BaseFolder = os.path.join(BaseFolders.EXPERIMENTS_STORE, self.Name)
self.RunBaseFolder = os.path.join(BaseFolders.EXPERIMENTS_RUN, self.Name)
# // Control Variables \\
self.RandomSeed = 2017
self.FoldNumber = None
self.MaxDiskSpaceForModels = 30 #GB
# // Composite Objects \\
self.Log = Logger()
self.MinuteUID=MinuteUID()
self.IsNew = True
self.StoreSub=None
self.RunSub=None
# // System \\
self.OSSignature = OperatingSystemSignature()
self.TensorflowVersion = tf.VERSION
#................................................................................
#Storage.EnsurePathExists(self.BaseFolder)
#Storage.EnsurePathExists(self.RunBaseFolder)
#------------------------------------------------------------------------------------
@property
def UID(self):
return self.MinuteUID.UID
#------------------------------------------------------------------------------------
@UID.setter
def UID(self, p_UID):
self.MinuteUID.UID = p_UID
#------------------------------------------------------------------------------------
def GetCode(self):
return "%d/%s" % (self.FoldNumber, self.MinuteUID.UID)
#------------------------------------------------------------------------------------
def GetERLString(self):
sERLString = "%s:%s|%d/%s" % ( self.DataSetName,
self.ModelName,
self.FoldNumber,
self.MinuteUID.UID )
return sERLString
#------------------------------------------------------------------------------------
def GetDataSetFolder(self, p_sSubFolder):
return Storage.JoinPath(BaseFolders.DATASETS, p_sSubFolder)
#------------------------------------------------------------------------------------
def __setFoldNumber(self, p_nFoldNumber):
self.FoldNumber = p_nFoldNumber
self.StoreSub = ExperimentSubFolder(self, self.FoldNumber, False)
self.RunSub = ExperimentSubFolder(self, self.FoldNumber, True)
#------------------------------------------------------------------------------------
def __determineInitialModelUID(self):
sFiles = Storage.GetFilesSorted(self.RunSub.ArchitectureCommonFolder)
sUID = None
for sFile in sFiles:
if sFile.startswith("initial-model_"):
_, sName, _ = Storage.SplitFileName(sFile)
sUID = sName[-12:]
# A standard fold number 1 and the last saved initial experiment in the common folder will be returned
return sUID
#------------------------------------------------------------------------------------
def GetInitialExperiment(self):
assert self.LearnConfig is not None, "Method requires a learn configuration."
oInitialExperiment=None
if self.LearnConfig.InitExperimentCode is not None:
if self.LearnConfig.InitExperimentCode == "=":
sInitExperimentUID = self.__determineInitialModelUID()
else:
sInitExperimentUID = self.LearnConfig.InitExperimentUID
# If automatic initial experiment is used, initial experiment UID is none for the first experiment.
if sInitExperimentUID is not None:
oInitialExperiment = ExperimentFolder(self.LearnConfig.Architecture, self.LearnConfig.DataSetName, self.LearnConfig.BatchSize)
oInitialExperiment.Open(self.LearnConfig.InitExperimentFoldNumber, sInitExperimentUID)
return oInitialExperiment
#------------------------------------------------------------------------------------
def Activate(self):
""" Returns
True : If a new experiment folder is created and the configuration was copied there
False: If an existing experiment folder is reused
"""
assert self.LearnConfig is not None, "Method requires a learn configuration."
if self.LearnConfig.SavedExperimentUID is not None:
self.Open(self.LearnConfig.FoldNumber, self.LearnConfig.SavedExperimentUID)
bMustArchive = False
else:
if self.LearnConfig.IsTraining:
self.Begin()
# Copies the source configuration file to the experiment subfolder "config"
Storage.CopyFile(self.LearnConfig.FileName, self.RunSub.LearnConfigFileName, True)
bMustArchive = True
else:
self.Open()
bMustArchive = False
return bMustArchive
#------------------------------------------------------------------------------------
def OpenERL(self, p_sERL=None, p_sERLString=None):
if p_sERLString is not None:
self.ERL = ERLString(p_sERLString)
elif p_sERL is not None:
self.ERL = p_sERL
elif self.LearnConfig is not None:
self.ERL = self.LearnConfig.ERL
assert self.ERL is not None, "No ERL given"
assert self.ERL.IsValid, "Invalid ERL %s" % self.ERL.String
self.Open(self.ERL.FoldNumber, self.ERL.ExperimentUID)
#------------------------------------------------------------------------------------
def Open(self, p_nFoldNumber=None, p_UID=None, p_bIsVerbose=True):
if (p_nFoldNumber is None) and (self.LearnConfig is not None):
p_nFoldNumber = self.LearnConfig.FoldNumber
assert p_nFoldNumber is not None
self.__setFoldNumber(p_nFoldNumber)
# if self.RunSub.LastUID == ExperimentSubFolder.NO_SUBFOLDERS:
# # If no experiment has been run for the fold number it starts the first experiment
# self.Begin(p_nFoldNumber)
# else:
# Initializes with the given UID or the UID of the last experiment
self.IsNew = False
if p_UID is None:
self.MinuteUID.UID = self.RunSub.LastUID
else:
self.MinuteUID.UID = p_UID
self.StoreSub.Initialize(self.MinuteUID.UID, p_bIsEnsuringPaths=False)
self.RunSub.Initialize(self.MinuteUID.UID, p_bIsEnsuringPaths=False)
if p_bIsVerbose:
if type(self).__verboseLevel >= 1:
print("[TALOS] Loaded experiment [%s], stored in %s, started at %s" %
( self.MinuteUID.UID, self.RunSub.ExperimentFolder
, self.MinuteUID.DateTime.strftime(tcc.LOG_DATETIME_FORMAT) )
)
self.Log.Open("", p_nCustomLogFileName=self.RunSub.LogFileName, p_nLogType=Logger.LOG_TYPE_CONFIG)
self.Code = "%d" % self.FoldNumber + "/" + self.MinuteUID.UID
if self.ERL is None:
self.ERL = ERLString(self.GetERLString())
#------------------------------------------------------------------------------------
def Begin(self, p_nFoldNumber=None):
if (p_nFoldNumber is None) and (self.LearnConfig is not None):
p_nFoldNumber = self.LearnConfig.FoldNumber
assert p_nFoldNumber is not None
self.__setFoldNumber(p_nFoldNumber)
# Sets seeds for reproducibility
random.seed(self.RandomSeed)
np.random.seed(self.RandomSeed)
# Initializes a new UID for the current minute and ensures the subfolders
if not self.IsNew:
self.MinuteUID = MinuteUID()
self.StoreSub.Initialize(self.MinuteUID.UID, p_bIsEnsuringPaths=False)
self.RunSub.Initialize(self.MinuteUID.UID)
self.Code = "%d" % self.FoldNumber + "/" + self.MinuteUID.UID
if type(self).__verboseLevel >= 2:
print("Begin experiment at", self.MinuteUID.DateTime)
self.Log.Open("", p_nCustomLogFileName=self.RunSub.LogFileName, p_nLogType=Logger.LOG_TYPE_CONFIG)
self.Log.WriteLine("Initializing experiment [%s] ..." % self.Code)
self.Log.Flush()
#------------------------------------------------------------------------------------
def StoreCompressedModels(self):
sZipFiles = self.RunSub.ListCompressedModels()
sDestFolder = self.StoreSub.ExperimentModelFolder
Storage.EnsurePathExists(sDestFolder)
for sZipFile in sZipFiles:
self.Log.Print("Moving model %s to storage folder %s" % (sZipFile, sDestFolder))
Storage.MoveFileToFolder(sZipFile, sDestFolder)
Storage.DeleteEmptyFolder(self.RunSub.ExperimentModelFolder)
#------------------------------------------------------------------------------------
def WriteGraph(self, p_oMainGraph):
NetworkGraphWriter(self.RunSub.ExperimentConfigFolder, p_oMainGraph).Write()
#------------------------------------------------------------------------------------
def End(self):
self.Log.Flush()
#------------------------------------------------------------------------------------
#==================================================================================================
#==================================================================================================
class NetworkGraphWriter(object):
#------------------------------------------------------------------------------------
def __init__(self, p_sTensorboardLogFolder, p_oGraph):
#........ | Instance Attributes | ..............................................
self.TensorboardLogFolder = p_sTensorboardLogFolder
self.Graph = p_oGraph
#................................................................................
#------------------------------------------------------------------------------------
def Write(self):
oSummaryWriter = tf.summary.FileWriter(logdir=self.TensorboardLogFolder, graph=self.Graph, filename_suffix=".tf")
oSummaryWriter.flush()
oGraphDef = self.Graph.as_graph_def()
sGraphDefStr = str(oGraphDef)
with open(os.path.join(self.TensorboardLogFolder,"tensorflow-graph.txt"), "w") as oFile:
oFile.write(sGraphDefStr)
self.__writeBatchFile(self.TensorboardLogFolder)
#------------------------------------------------------------------------------------
def __writeBatchFile(self, p_sFolder):
sWinScriptFileName = os.path.join(p_sFolder,"browse-graph.bat")
sLinuxScriptFileName = os.path.join(p_sFolder,"bgr.sh")
for nIndex, sFile in enumerate( [sWinScriptFileName, sLinuxScriptFileName] ):
with open(sFile, "w") as oFile:
if nIndex == 0:
print("call %s %s" % (SysParams.AnacondaActivation, SysParams.AnacondaEnvironment), file=oFile)
else:
print("#! /bin/bash", file=oFile)
print("source activate %s" % SysParams.AnacondaEnvironment, file=oFile)
print("cd " + self.TensorboardLogFolder, file=oFile)
print("tensorboard --logdir .", file=oFile)
#------------------------------------------------------------------------------------
#==================================================================================================
#==================================================================================================
class ClassificationEvaluator(object):
#------------------------------------------------------------------------------------
def __init__(self, p_oNetwork, p_bIsDiscardingModels=True):
#........ | Instance Attributes | ..............................................
self.Network = p_oNetwork
self.ExperimentSub = self.Network.Experiment.RunSub
self.Models=None
self.ModelIndex=None
self.CurrentModelEpochNumber = None
self.CurrentModelFolder = None
self.HasLoadedModel = None
self.IsDiscardingModels = p_bIsDiscardingModels
self.IsAutoDeterminingBestModels = False
#................................................................................
#------------------------------------------------------------------------------------
def FirstModel(self):
self.Models = self.ExperimentSub.ListSavedModels()
# If models folder is delete then checks the save results
if self.Models == []:
self.Models = self.ExperimentSub.ListSavedResults()
self.ModelIndex = -1
return self.NextModel()
#------------------------------------------------------------------------------------
def NextModel(self):
self.CurrentModelEpochNumber = None
self.CurrentModelFolder = None
self.ModelIndex += 1
if self.ModelIndex < len(self.Models):
oRec = self.Models[self.ModelIndex]
self.CurrentModelEpochNumber = oRec[0]
self.CurrentModelFolder = oRec[1]
if not self.IsExistingModelResults():
self.Network.Log.Print("Evaluating folder %s" % self.CurrentModelFolder)
self.Network.State.LoadWeights(self.CurrentModelEpochNumber)
self.HasLoadedModel = True
else:
self.Network.Log.Print("Results found for epoch %d" % self.CurrentModelEpochNumber)
self.HasLoadedModel = False
return self.CurrentModelEpochNumber
#------------------------------------------------------------------------------------
def EndOfModels(self):
return self.ModelIndex >= len(self.Models)
#------------------------------------------------------------------------------------
def IsExistingModelResults(self):
#print(self.ExperimentSub.ModelResultsFileNameTemplate % self.CurrentModelEpochNumber)
return Storage.IsExistingFile(self.ExperimentSub.ModelResultsFileNameTemplate % self.CurrentModelEpochNumber)
#------------------------------------------------------------------------------------
def CalculateMetrics(self, p_oPrediction):
oMetrics = ClassificationMetrics()
oMetrics.Calculate(p_oPrediction.Actual, p_oPrediction.Predicted, p_oPrediction.PredictedProbs)
oMetrics.CalculateTopK(p_oPrediction.TopKappa, p_oPrediction.TopKCorrect)
oMetrics.IDs = p_oPrediction.SampleIDs
oMetrics.Save( self.ExperimentSub.ModelResultsFileNameTemplate % self.CurrentModelEpochNumber )
#------------------------------------------------------------------------------------
def DetermineBestModels(self, p_oExperiment=None):
oBest = ClassificationBest(self.ExperimentSub.ExperimentResultsFolder)
oBest.Run()
oBest.Save(self.ExperimentSub.BestModelResultsFileName)
oBest.ExportToText(self.ExperimentSub.BestModelTextFileName, p_oExperiment)
# Discards all models except the best ones
if self.IsDiscardingModels:
self.ExperimentSub.DiscardModels(oBest.DiscardedEpochs)
# Compresses the each model parameters folder into a zip file
self.ExperimentSub.CompressModels(oBest.BestEpochs)
# Moves the model zip files to the neural network experiment storage
self.Network.Experiment.StoreCompressedModels()
#------------------------------------------------------------------------------------
def Evaluate(self, p_oIterator):
bMustStart = False
self.FirstModel()
while not self.EndOfModels():
if self.HasLoadedModel:
bMustStart = True
break
self.NextModel()
if bMustStart:
p_oIterator.Start()
self.FirstModel()
while not self.EndOfModels():
if self.HasLoadedModel:
# Recalls the data through the trained model
oPrediction = self.Network.PredictEval(p_oIterator)
self.CalculateMetrics(oPrediction)
p_oIterator.Resume()
self.NextModel()
p_oIterator.Stop(True)
if self.IsAutoDeterminingBestModels:
self.DetermineBestModels()
#------------------------------------------------------------------------------------
#==================================================================================================
|
"""Convolutional Layers for NeuralPy"""
from .conv1d import Conv1D
from .conv2d import Conv2D
from .conv3d import Conv3D
|
import dash
from dash.dependencies import Input, Output
from .. import id_constants, utils
from ..dash_app import app
@app.callback(
Output(id_constants.SIGNAL_SLI_REFRESH, "children"),
[
Input(id_constants.REFRESH_SLI_BUTTON, "n_clicks_timestamp"),
Input(id_constants.REFRESH_SLI_INTERVAL, "n_intervals"),
],
prevent_initial_call=True,
)
def update_sli_refresh_signal(n_clicks_timestamp: int, n_intervals: int):
"""Updates the SLI refresh signal.
Args:
n_clicks_timestamp: the timestamp when the SLI refresh button was clicked
n_intervals: the amount of times the interval component was updated
Returns:
a constant to be placed into the SLI refresh signal
"""
ctx = dash.callback_context
triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)
return triggered_id
|
'''
A pronic number is a number which is the product of two consecutive integers.
Such as 2=2*1 (2 and 1 are consecutive numbers)
12=4*3
This program will print the pronic numbers in the given range. '''
import math
'''Function to check whether number is pronic or not
A number is pronic if the root of equation i^2+i-num=0 is real and integer.'''
def is_pronic(n):
dis = 1 + 4 * n
if dis <= 0:
return 0
else:
root = int(math.sqrt(dis))
if root * root == dis and dis % 2 == 1:
return 1
else:
return 0
if __name__ == '__main__':
ran_ge = list(input("Enter range to print all the PRONIC NUMBERS: ").split())
#Printing pronic numbers in given range
print("PRONIC NUMBERS from "+ ran_ge[0] +" to "+ran_ge[1]+" are:")
for i in range(int(ran_ge[0]),int(ran_ge[1])+1):
if is_pronic(i):
print(i,end=" ")
'''
Sample Input/Output:
Input:
Enter range to print all the PRONIC NUMBERS: 1 1000
Output:
PRONIC NUMBERS from 1 to 1000 are:
2 6 12 20 30 42 56 72 90 110 132 156 182 210 240 272 306 342 380 420 462 506 552 600 650 702 756 812 870 930 992
Time Complexity:O(n) where n is total numbers in range
Time Complexity of is_pronic()=O(1)
Space Complexity:O(1)
'''
|
# Copyright (c) 2013, ReMake Electric ehf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This is a module for a single message consumption process
It listens to a topic and expects to see a known sequence of messages.
"""
from __future__ import division
import collections
import errno
import logging
import math
import os
import stat
import tempfile
import time
import fuse
import paho.mqtt.client as mqtt
from beem.trackers import ObservedMessage as MsgStatus
class TrackingListener():
"""
An MQTT message subscriber that tracks an expected message sequence
and generates timing, duplicate/missing and monitors for drops
"""
msg_statuses = []
def __init__(self, host, port, opts):
self.options = opts
self.cid = opts.clientid
self.log = logging.getLogger(__name__ + ":" + self.cid)
self.mqttc = mqtt.Client(self.cid)
self.mqttc.on_message = self.msg_handler
self.mqttc.on_subscribe = self.subscribe_handler
#self.mqttc.on_log = self.log_handler
self.mqttc.on_disconnect = self.disconnect_handler
self.mqttc.on_connect = self.connect_handler
self.listen_topic = opts.topic
self.time_start = None
self.connected = False
# TODO - you _probably_ want to tweak this
if hasattr(self.mqttc, "max_inflight_messages_set"):
self.mqttc.max_inflight_messages_set(1000)
if opts.ca_certs or opts.certfile or opts.keyfile:
self.log.info("TLS %s %s %s", opts.ca_certs, opts.certfile, opts.keyfile)
self.mqttc.tls_set(opts.ca_certs, opts.certfile, opts.keyfile)
self.log.info("Host %s Port %s", host, port)
rc = self.mqttc.connect(host, port, 60)
if rc:
raise Exception("Couldn't even connect! ouch! rc=%d" % rc)
self.log.info("Connection established")
self.drop_count = None
self.dropping = False
self.mqttc.loop_start()
time.sleep(4)
def subscribe_handler(self, client, userdata, mid, granted_qos):
self.log.info("subscribe handler: %d", mid)
def log_handler(self, client, userdata, level, buf):
self.log.info("%d: %s", level, buf)
def connect_handler(self, client, userdata, flags, rc):
self.connected = True
def disconnect_handler(self, client, userdata, rc):
self.log.info("disconnect handler: %d", rc)
def msg_handler(self, mosq, userdata, msg):
# WARNING: this _must_ release as quickly as possible!
# get the sequence id from the topic
#self.log.debug("heard a message on topic: %s", msg.topic)
#if msg.topic == '$SYS/broker/publish/messages/dropped':
# if self.drop_count:
# self.log.warn("Drop count has increased by %d",
# int(msg.payload) - self.drop_count)
# self.dropping = True
# else:
# self.drop_count = int(msg.payload)
# self.log.debug("Initial drop count: %d", self.drop_count)
# return
if not self.time_start:
self.time_start = time.time()
try:
ms = MsgStatus(msg)
self.msg_statuses.append(ms)
except Exception:
self.log.exception("Failed to parse a received message. (Is the publisher sending time-tracking information with -t?)")
def run(self, qos=1):
"""
Start a (long lived) process waiting for messages to arrive.
The number of clients and messages per client that are expected
are set at creation time
"""
while not self.connected:
time.sleep(1)
self.expected = self.options.msg_count * self.options.client_count
self.log.info(
"Listening for %d messages on topic %s (q%d)",
self.expected, self.listen_topic, qos)
rc, mid = self.mqttc.subscribe(self.listen_topic, qos)
self.log.info("Subscribe returned %d %d", rc, mid)
if rc:
raise Exception("Couldn't even subscribe! ouch! rc=%d" % rc)
while len(self.msg_statuses) < self.expected:
# let the mosquitto thread fill us up
time.sleep(1)
self.log.info("Still waiting for %d messages",
self.expected - len(self.msg_statuses))
if self.dropping:
self.log.error("Detected drops are occuring, aborting test!")
break
self.time_end = time.time()
self.mqttc.disconnect()
def stats(self):
msg_count = len(self.msg_statuses)
flight_times = [x.time_flight() for x in self.msg_statuses]
mean = sum(flight_times) / len(flight_times)
squares = [x * x for x in [q - mean for q in flight_times]]
stddev = math.sqrt(sum(squares) / len(flight_times))
actual_clients = set([x.cid for x in self.msg_statuses])
per_client_expected = range(1, self.options.msg_count + 1)
per_client_stats = {}
for cid in actual_clients:
per_client_real = [x.mid for x in self.msg_statuses if x.cid == cid]
per_client_duplicates = [x for x, y in collections.Counter(per_client_real).items() if y > 1]
per_client_real_mid = [x.mid for x in self.msg_statuses if x.cid == cid]
per_client_missing_mid = list(set(per_client_expected).difference(set(per_client_real_mid)))
per_client_stats[cid] = {
"actual_count": len(per_client_real),
"expected_count": len(per_client_expected),
"duplicate_count": len(per_client_duplicates),
"missing_count": len(per_client_missing_mid)
}
return {
"clientid": self.cid,
"client_count": len(actual_clients),
"test_complete": not self.dropping,
"msg_count": msg_count,
"per_client": per_client_stats,
"ms_per_msg": (self.time_end - self.time_start) / msg_count * 1000,
"msg_per_sec": msg_count / (self.time_end - self.time_start),
"time_total": self.time_end - self.time_start,
"flight_time_mean": mean,
"flight_time_stddev": stddev,
"flight_time_max": max(flight_times),
"flight_time_min": min(flight_times)
}
def static_file_attrs(content=None):
now = time.time()
if content:
size = len(content)
else:
size = 20
return {
"file": dict(st_mode=(stat.S_IFREG | 0o0444), st_nlink=1,
st_size=size,
st_ctime=now, st_mtime=now,
st_atime=now),
"content": content
}
class MalariaWatcherStatsFS(fuse.LoggingMixIn, fuse.Operations):
file_attrs = dict(st_mode=(stat.S_IFREG | 0o0444), st_nlink=1,
st_size=20000,
st_ctime=time.time(), st_mtime=time.time(),
st_atime=time.time())
dir_attrs = dict(st_mode=(stat.S_IFDIR | 0o0755), st_nlink=2,
st_ctime=time.time(), st_mtime=time.time(),
st_atime=time.time())
README_STATFS = """
This is a FUSE filesystem that contains a set of files representing various
statistics we have gathered about the MQTT broker we are watching and the
topics we are subscribed to.
"""
msgs_total = 0
msgs_stored = 0
drop_count = 0
drop_count_initial = None
def handle_msgs_total(self):
"""Total number of messages seen since we started"""
return self.msgs_total
def handle_msgs_stored(self):
"""Total number of stored ($sys/broker/messages/stored)"""
return self.msgs_stored
def handle_uptime(self):
"""Time in seconds this watcher has been running"""
return time.time() - self.time_start
def handle_drop_count(self):
"""Total drops since this watcher has been running"""
return self.drop_count
def handle_topic(self):
"""The topics this watcher is subscribing too"""
return '\n'.join(self.listen_topics)
def handle_readme(self):
"""Returns 'this' readme ;)"""
rval = self.README_STATFS
useful = [x for x in self.handlers if x != '/']
file_field = "File "
rval += "\n" + file_field + "Description\n\n"
for h in useful:
func = self.handlers[h].get("handler", None)
desc = None
if not func:
desc = "Raw file, no further description"
if func:
desc = func.__doc__
if not desc:
desc = "No description in handler method! (Fix pydoc!)"
# pad file line to line up with the description
line = "%s%s\n" % (str.ljust(h[1:], len(file_field)), desc)
rval += line
return rval
handlers = {
"/": {"file": dir_attrs, "handler": None},
"/msgs_total": {"file": file_attrs, "handler": handle_msgs_total},
"/msgs_stored": {"file": file_attrs, "handler": handle_msgs_stored},
"/uptime": {"file": file_attrs, "handler": handle_uptime},
"/topic": {"file": file_attrs, "handler": handle_topic},
"/drop_count": {"file": file_attrs, "handler": handle_drop_count},
"/README": static_file_attrs(README_STATFS),
"/README.detailed": {"file": file_attrs, "handler": handle_readme}
}
def __init__(self, options):
print("listener operations __init__")
self.options = options
self.time_start = time.time()
def msg_handler(self, mosq, userdata, msg):
# WARNING: this _must_ release as quickly as possible!
# get the sequence id from the topic
#self.log.debug("heard a message on topic: %s", msg.topic)
if "/messages/dropped" in msg.topic:
if self.drop_count_initial:
self.log.warn("Drop count has increased by %d",
(int(msg.payload) - self.drop_count_initial))
self.drop_count = int(msg.payload) - self.drop_count_initial
else:
self.drop_count_initial = int(msg.payload)
self.log.debug("Initial drops: %d", self.drop_count_initial)
return
if "messages/stored" in msg.topic:
self.msgs_stored = int(msg.payload)
return
self.msgs_total += 1
def init(self, path):
"""
Fuse calls this when it's ready, so we can start our actual mqtt
processes here.
"""
print("listener post init init(), path=", path)
self.cid = self.options.clientid
self.log = logging.getLogger(__name__ + ":" + self.cid)
self.mqttc = mqtt.Client(self.cid)
self.mqttc.on_message = self.msg_handler
self.listen_topics = self.options.topic
# TODO - you _probably_ want to tweak this
self.mqttc.max_inflight_messages_set(200)
rc = self.mqttc.connect(self.options.host, self.options.port, 60)
if rc:
raise Exception("Couldn't even connect! ouch! rc=%d" % rc)
# umm, how?
# b/p/m for >= 1.2, b/m for 1.1.x
#self.mqttc.subscribe('$SYS/broker/publish/messages/dropped', 0)
#self.mqttc.subscribe('$SYS/broker/messages/dropped', 0)
#self.mqttc.subscribe('$SYS/broker/messages/stored', 0)
self.mqttc.loop_start()
[self.mqttc.subscribe(t, self.options.qos) for t in self.listen_topics]
def getattr(self, path, fh=None):
if path not in self.handlers:
raise fuse.FuseOSError(errno.ENOENT)
return self.handlers[path]["file"]
def read(self, path, size, offset, fh):
if self.handlers[path].get("content", False):
return self.handlers[path]["content"]
funcy = self.handlers[path]["handler"]
return str(funcy(self)) + "\n"
def readdir(self, path, fh):
return ['.', '..'] + [x[1:] for x in self.handlers if x != '/']
class CensusListener():
"""
Create a listener that just watches all the messages go past.
It doesn't care about time in flight or expected vs actual, it just cares
about what it has seen, and maintains long term stats on whatever
it does see.
"""
def __init__(self, options):
self.log = logging.getLogger(__name__)
path_provided = True
if not options.directory:
path_provided = False
options.directory = tempfile.mkdtemp()
self.log.info("Statistics files will be available in %s", options.directory)
fuse.FUSE(MalariaWatcherStatsFS(options),
options.directory, foreground=True)
if not path_provided:
self.log.info("Automatically removing statsfs: %s", options.directory)
os.rmdir(options.directory)
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tabular Q-Learner self-play example.
Two Q-Learning agents are trained by playing against each other.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import app
from absl import flags
import numpy as np
from six.moves import range
from open_spiel.python import rl_environment
from open_spiel.python import rl_tools
from open_spiel.python.algorithms import tabular_qlearner
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_train_episodes", int(1e6),
"Number of training episodes.")
flags.DEFINE_integer("num_eval_episodes", int(1e4),
"Number of episodes to use during each evaluation.")
flags.DEFINE_integer("eval_freq", int(1e4),
"The frequency (in episodes) to run evaluation.")
flags.DEFINE_string(
"epsilon_schedule", None,
"Epsilon schedule: e.g. 'linear,init,final,num_steps' or "
"'constant,0.2'")
flags.DEFINE_string("game", "tic_tac_toe", "Game to load.")
def eval_agents(env, agents, num_episodes):
"""Evaluate the agents, returning a numpy array of average returns."""
rewards = np.array([0] * env.num_players, dtype=np.float64)
for _ in range(num_episodes):
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
for i in range(env.num_players):
rewards[i] += time_step.rewards[i]
rewards /= num_episodes
return rewards
def create_epsilon_schedule(sched_str):
"""Creates an epsilon schedule from the string as desribed in the flags."""
values = FLAGS.epsilon_schedule.split(",")
if values[0] == "linear":
assert len(values) == 4
return rl_tools.LinearSchedule(
float(values[1]), float(values[2]), int(values[3]))
elif values[0] == "constant":
assert len(values) == 2
return rl_tools.ConstantSchedule(float(values[1]))
else:
print("Unrecognized schedule string: {}".format(sched_str))
sys.exit()
def main(_):
env = rl_environment.Environment(FLAGS.game)
num_players = env.num_players
num_actions = env.action_spec()["num_actions"]
agents = []
if FLAGS.epsilon_schedule is not None:
for idx in range(num_players):
agents.append(
tabular_qlearner.QLearner(
player_id=idx,
num_actions=num_actions,
epsilon_schedule=create_epsilon_schedule(FLAGS.epsilon_schedule)))
else:
agents = [
tabular_qlearner.QLearner(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
# 1. Train the agents
training_episodes = FLAGS.num_train_episodes
for cur_episode in range(training_episodes):
if cur_episode % int(FLAGS.eval_freq) == 0:
avg_rewards = eval_agents(env, agents, FLAGS.num_eval_episodes)
print("Training episodes: {}, Avg rewards: {}".format(
cur_episode, avg_rewards))
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
|
from pydub import AudioSegment
from pydub.effects import normalize
import os
import numpy as np
from os.path import join, splitext
from random import randint, shuffle
import h5py as h5
fragment_length = 10
max_dataset_size = 65536
fs = 44100
bd = 16
fmt = [None, "mp3", "aac", "ogg", "wma", "ac3"]
ffmpeg_format = [None, "mp3", "adts", "ogg", "asf", "ac3"]
ffmpeg_codec = [None, "libmp3lame", "aac", "libvorbis", "wmav2", "ac3"]
bitrates = ["320k", "192k", "128k"]
bitrates_labels = [1, 2, 3]
fmt_labels = [0, 10, 20, 30, 40, 50]
lenbr = len(bitrates)*len(fmt) - len(bitrates) + 1
allfiles = []
for (root, dirs, files) in os.walk(os.getcwd()):
for cfile in files:
if splitext(cfile)[-1].lower() == ".flac":
allfiles.append(join(root, cfile))
maxsize = len(allfiles)*lenbr
shuffle(allfiles)
actualsize = 0
with h5.File('dataset.h5', 'w') as f:
labels = f.create_dataset("labels", (maxsize,), maxshape=(maxsize,),
dtype="int8")
data = f.create_dataset("data", (maxsize, fragment_length*fs),
maxshape=(maxsize, fragment_length*fs),
dtype=f"int{bd}")
for fpath in allfiles:
try:
audio = AudioSegment\
.from_file(fpath, format="flac")
if audio.duration_seconds > fragment_length and audio.frame_rate == fs:
print(f"Preparing: {fpath} ({int(actualsize/lenbr)}/{int(maxsize/lenbr)})")
audio = normalize(audio.set_sample_width(bd//8))
start = randint(0, np.floor((audio.duration_seconds-fragment_length)*1000))
fragment = audio[start:(fragment_length*1000+start)]
for i1 in range(len(fmt)):
if fmt[i1] is None:
labels[actualsize] = fmt_labels[i1]
data[actualsize, :] = fragment.set_channels(1).get_array_of_samples()
actualsize += 1
else:
for i2 in range(len(bitrates)):
labels[actualsize] = bitrates_labels[i2] + fmt_labels[i1]
fragment.export(f"/tmp/tmp.{fmt[i1]}",
format=ffmpeg_format[i1],
codec=ffmpeg_codec[i1],
bitrate=bitrates[i2])
fragment = AudioSegment.from_file(f"/tmp/tmp.{fmt[i1]}")
data[actualsize, :] = fragment.set_channels(1)\
.set_sample_width(bd//8)\
.get_array_of_samples()[0:fragment_length*fs]
actualsize += 1
if actualsize >= max_dataset_size:
break
except KeyboardInterrupt:
print("Received SIGINT, exiting...")
break
except Exception as ex:
print(f"Exception: {ex}")
continue
labels.resize(actualsize, 0)
data.resize(actualsize, 0)
|
'''
Binary Search
Step 1: Initialize 2 pointers, start at beginning of array, end at the end of the array。
Step 2: Find the element at the middle of the 2 pointers
Step 3:If element at the middle is bigger than our goal, set end pointer to the middle
Step 4:If element at the middle is smaller than our goal,set start pointer to middle +1
eg. arrary:[1,2,3,4,5,6]
target = 6
'''
def binarySearch(arr, target):
left = 0
right = len(arr) - 1
while left <= right:
mid = (left+right)//2
if arr[mid] == target:
return mid
elif arr[mid] > target:
right = mid
elif arr[mid] < target:
left = mid + 1
else:
right = mid -1
return -1
array = [1, 2, 3, 4, 5, 6]
result = binarySearch(array, 5)
if result != -1:
print("Element is present at the index %d" %result)
else:
print("Element is not present in the array.") |
# Generated by Django 2.2.9 on 2020-02-06 22:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('agents', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='agent',
options={'ordering': ['name']},
),
]
|
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
import pickle
import tensorflow as tf
import vae_
from sklearn.utils import shuffle
import collections
STEM_SAMPLE_POINT_NUM = 20
data_size = 40*3
batch_size = 50
# settings for VAE.
n_hidden = 100 # num of hidden units in mlp.
dim_z = 20 # dim of latent.
br_data_size = 4 * dim_z + 3
# data_path = "C:/Users/Administrator/Desktop/neuron_factor_tree_test/test/data/L4_BC/data"
#
# sess_save_dir = "C:/Users/Administrator/Desktop/neuron_factor_tree_test/test/data/L4_BC/data/save"
# save_dir = "C:/Users/Administrator/Desktop/neuron_factor_tree_test/test/data/L4_BC/data/save"
'''
Load both branch codes and branch id sequences.
'''
class SeqLoader():
def __init__(self, test_path, seq_size, branch_size, batch_size, seq_type):
self.data_path = os.path.join(test_path, "seq_data")
self.save_path = os.path.join(test_path, "vae_save")
self.seq_size = seq_size
self.branch_size = branch_size
self.batch_size = batch_size
self.type = seq_type
# load rnn data.
training_data = os.path.join(self.data_path, "factorDB"+"_"+str(self.type)+".txt")
if not (os.path.exists(training_data)):
self.build_seq_data()
else:
self.load_processed(self.data_path)
# self.build_seq_data()
self.create_batches()
self.reset_batch()
# Collect all the branch codes(normalized).
def branches_loader(self, branch_codes_file):
'''
A Branch : [angle, stm_scale_1, stm_scale_2, br_codes[sample_point_num, 3]].
'''
branches = []
branch = []
branch_codes = []
br_angle = 0.0
stm_scale = 1.0
with open(branch_codes_file) as f:
for line in f.readlines():
if line.startswith("#BRANCH:"):
branch = []
branch_codes = []
elif line.startswith("#ANGLE:"):
br_angle = float(line[:-1].split(" ")[1])
branch.append(br_angle)
elif line.startswith("#STEM:"):
stm_scale = float(line[:-1].split(" ")[1])
branch.append(stm_scale)
elif line.startswith("#DATA:"):
code_str = np.array(line[:-1].split(" ")[1:4])
code = list(map(float, code_str))
branch_codes.append(code)
elif line.startswith("#BRANCHEND"):
branch.append(branch_codes)
# print(branch)
branches.append(branch)
# print(len(branches))
return branches
# Load sequence as branch ids, only load a type each time.
def sequences_loader(self, branch_database_path):
seqs = []
seq = []
current_type = 0
with open(branch_database_path) as f:
for line in f.readlines():
if line.startswith("#SEQ"):
if current_type==self.type and len(seq) != 0:
seqs.append(seq)
seq = []
current_type = int(np.array(line[:-1].split(" ")[1])) # update current seq type
elif line.startswith("#BID"):
b_id_str = np.array(line[:-1].split(" ")[1])
b_id = float(b_id_str)
seq.append(b_id)
if current_type==self.type and len(seq)!=0:
seqs.append(seq)
# print(len(seqs))
return seqs
def process_branches(self, branches):
print("Processing branches.")
# load scaler_file created in branch_generator traning step.
scaler_file = os.path.join(self.data_path, "scaler.txt")
with open(scaler_file, "rb") as f:
min_max_scaler = pickle.load(f)
print("Scaler file loaded from [%s]." % scaler_file)
self.min_max_scaler = min_max_scaler # record the scaler.
br_count = np.array([[br[0], br[1], br[2]] for br in branches])
# print(br_count.shape)
angle_stm_scaler = MinMaxScaler()
angle_stm_scaler.fit(br_count)
angle_stm_scaler_file = os.path.join(self.data_path, "angle_stm_scaler.txt")
with open(angle_stm_scaler_file, "wb") as f:
pickle.dump(angle_stm_scaler, f)
print("Angle stm scaler file saved at [%s]." % angle_stm_scaler_file)
# Normalixation
data = [[angle_stm_scaler.transform(np.array([br[0], br[1], br[2]]).reshape(-1, 3)),
min_max_scaler.transform(br[3]).reshape(STEM_SAMPLE_POINT_NUM*2, 3, 1)]
for br in branches]
# print(len(data))
# print(data[0])
return data
def build_seq_data(self):
print("Building sequence datas.")
branch_seq_path = os.path.join(self.data_path, "branch_dataset.brd")
branch_code_path = os.path.join(self.data_path, "branch_code.bcd")
# Load branches.
branches = self.branches_loader(branch_code_path)
print("{} branches loaded.".format(len(branches)))
self.branches = self.process_branches(branches) # [None, [angle, scale_1, scale_2, codes]].
# Load branch id sequences.
self.sequences = self.sequences_loader(branch_seq_path)
print("{} sequences loaded.".format(len(self.sequences)))
if int(len(self.sequences) / self.batch_size) == 0:
self.tensor = []
return
# print(self.sequences)
# Sequence normalization.
seq_ids = []
for seq in self.sequences:
seq.reverse()
seq_n = []
for id in [int(f) for f in seq]:
if id!=-1: # empty branches.
seq_n.append(id)
if(len(seq_n)==0):
continue
elif len(seq_n) < self.seq_size:
for i in range(self.seq_size - len(seq_n)):
seq_n.append(-1)
else: # equals or shorter than seq_size.
seq_n = seq_n[(len(seq_n) - self.seq_size) + 1:]
seq_n.append(-1) # make sure last one is -1.
seq_ids.append(seq_n)
# print("shape")
# print(np.array(seq_ids).shape)
# x = tf.placeholder(tf.float32, shape=[None, data_size], name='input_img')
x = tf.placeholder(tf.float32, shape=[None, 20, 3, 1], name='input')
latent = vae_.gaussian_MLP_encoder(x, n_hidden, dim_z, 1.0)
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(self.save_path)
if ckpt:
print("VAE model restored for branch latent transform.")
saver.restore(sess, ckpt.model_checkpoint_path)
else:
return
latent_data = []
for seq in seq_ids:
seq_latents = []
# seq_latents.append(np.array([1.0 for _ in range(br_data_size)])) # all 1.0 for soma.
for br in seq:
if br != -1:
_st_mean, _st_stdv = \
sess.run(latent, feed_dict={x: [self.branches[br][1][:STEM_SAMPLE_POINT_NUM]]})# stem 1
st_latent = np.append(_st_mean[0], _st_stdv[0])
_st_mean, _st_stdv = \
sess.run(latent, feed_dict={x: [self.branches[br][1][STEM_SAMPLE_POINT_NUM:]]})
st_latent_ = np.append(_st_mean[0], _st_stdv[0])
br_latent = np.append(np.array(self.branches[br][0]), st_latent)
br_latent = np.append(br_latent, st_latent_)
# print(len(br_latent))
# print(br_latent)
seq_latents.append(br_latent)
for i in range(self.seq_size - len(seq_latents)):
seq_latents.append(np.array([0.0 for _ in range(self.branch_size)])) # all 0 for leaf.
latent_data.append(seq_latents)
# print(len(latent_data))
# print(latent_data[0][0])
x_data_ = np.array(latent_data, dtype=np.float32)
print(x_data_.shape) # (data_size, seq_size, 4*dim_z+3)
print("Training data normalized.")
x_data = shuffle(x_data_, random_state=42)
# make up a neuronFactor-database. Each nf in DB have a id.
factors_ = x_data.reshape(-1, self.branch_size) # reshape to count factors.
factors_s = [str(f) for f in factors_.tolist()]
counter = collections.Counter(factors_s)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
self.factors, _ = zip(*count_pairs)
print(len(self.factors))
# for f in self.factors:
# print(f)
self.factorDB_size = len(self.factors)
self.factorDB = dict(zip(self.factors, range(len(self.factors))))
db_file = os.path.join(self.data_path, "factorDB" + "_" + str(self.type) + ".txt")
print("Got a NeuronFactor Database of size %d." % self.factorDB_size)
tensor = np.array(list(map(self.factorDB.get, factors_s)))
tensor_file = os.path.join(self.data_path, "tensor" + "_" + str(self.type) + "")
self.tensor = tensor.reshape(-1, self.seq_size) # seq_num * seq_size
# save data.
with open(db_file, "wb") as f:
pickle.dump(self.factors, f)
print("NeuronFactorDB saved at [%s]." % db_file)
np.save(tensor_file, self.tensor)
print("Factor tensors saved at [%s]." % tensor_file)
print("Data preprocessed.")
def load_processed(self, data_base_dir):
db_file = os.path.join(data_base_dir, "factorDB_"+str(self.type)+".txt")
tensor_file = os.path.join(data_base_dir, "tensor_"+str(self.type)+".npy")
# Loading data.
with open(db_file, "rb") as f:
self.factors = pickle.load(f)
self.factorDB_size = len(self.factors)
self.factorDB = dict(zip(self.factors, range(len(self.factors))))
self.tensor = np.load(tensor_file)
print("[%d] training data loaded from [%s]." % (len(self.tensor), tensor_file))
def create_batches(self):
self.num_batches = int(len(self.tensor) / self.batch_size)
if self.num_batches== 0:
print("Don't have enough data, finish.")
return
# use the first self.batch_size*self.num_batches data.
tensor = self.tensor[:self.batch_size*self.num_batches]
x_data = tensor
y_data = np.copy(tensor)
for y_ in y_data:
y_[:-1] = y_[1:]
y_[-1] = 0
self.x_batches = np.split(x_data, self.num_batches)
self.y_batches = np.split(y_data, self.num_batches)
def reset_batch(self):
self.index_id = 0
def next_batch(self): # in test use for range.
x, y = self.x_batches[self.index_id], \
self.y_batches[self.index_id]
self.index_id += 1
return x, y
def get_seq_ter_prob(self):
branch_seq_path = os.path.join(self.data_path, "branch_dataset.brd")
seqs = self.sequences_loader(branch_seq_path)
seq_len_list = [len(seq) for seq in seqs]
seq_len_counter = collections.Counter(seq_len_list)
t_prob = {} # A dict for terminate prob.
for l, _ in seq_len_counter.items():
c = sum([c_ for l_, c_ in seq_len_counter.items() if l_ == l])
# rest longer one
r = 0 # rest longer one
for l_, c_ in seq_len_counter.items():
r_ = 0
if l_ > l: # for longer one
for i in range(l_ - l):
c_ = c_ / 2 # get rid of repetitive sub tree
r_ += c_
r += r_
t_prob[l] = float(c) / float(c + r)
print("Got termination prob.")
return t_prob
# loader = SeqLoader("C:/Users/Administrator/Desktop/TestFullNeuron", 20, 2*STEM_SAMPLE_POINT_NUM, 50, 3)
# x, y = loader.next_batch()
# print(x)
# print(y)
|
"""The Volkswagen We Connect ID integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from weconnect import weconnect
from weconnect.elements.control_operation import ControlOperation
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
PLATFORMS = [Platform.BINARY_SENSOR, Platform.BUTTON, Platform.SENSOR, Platform.NUMBER]
_LOGGER = logging.getLogger(__name__)
SUPPORTED_VEHICLES = ["ID.3", "ID.4", "ID.5"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Volkswagen We Connect ID from a config entry."""
hass.data.setdefault(DOMAIN, {})
_we_connect = weconnect.WeConnect(
username=entry.data["username"],
password=entry.data["password"],
updateAfterLogin=False,
loginOnInit=False,
)
await hass.async_add_executor_job(_we_connect.login)
await hass.async_add_executor_job(_we_connect.update)
async def async_update_data():
"""Fetch data from Volkswagen API."""
await hass.async_add_executor_job(_we_connect.update)
vehicles = []
for vin, vehicle in _we_connect.vehicles.items():
if vehicle.model.value in SUPPORTED_VEHICLES:
vehicles.append(vehicle)
hass.data[DOMAIN][entry.entry_id + "_vehicles"] = vehicles
return vehicles
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=timedelta(seconds=30),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id + "_coordinator"] = coordinator
hass.data[DOMAIN][entry.entry_id] = _we_connect
hass.data[DOMAIN][entry.entry_id + "_vehicles"] = []
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
# Setup components
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
@callback
async def volkswagen_id_start_stop_charging(call: ServiceCall) -> None:
vin = call.data["vin"]
start_stop = call.data["start_stop"]
if (
await hass.async_add_executor_job(
start_stop_charging,
vin,
_we_connect,
start_stop,
)
is False
):
_LOGGER.error("Cannot send charging request to car")
@callback
async def volkswagen_id_set_climatisation(call: ServiceCall) -> None:
vin = call.data["vin"]
start_stop = call.data["start_stop"]
target_temperature = 0
if "target_temp" in call.data:
target_temperature = call.data["target_temp"]
if (
await hass.async_add_executor_job(
set_climatisation,
vin,
_we_connect,
start_stop,
target_temperature,
)
is False
):
_LOGGER.error("Cannot send climate request to car")
@callback
async def volkswagen_id_set_target_soc(call: ServiceCall) -> None:
vin = call.data["vin"]
target_soc = 0
if "target_soc" in call.data:
target_soc = call.data["target_soc"]
if (
await hass.async_add_executor_job(
set_target_soc,
vin,
_we_connect,
target_soc,
)
is False
):
_LOGGER.error("Cannot send target soc request to car")
@callback
async def volkswagen_id_set_ac_charge_speed(call: ServiceCall) -> None:
vin = call.data["vin"]
if "maximum_reduced" in call.data:
if (
await hass.async_add_executor_job(
set_ac_charging_speed,
vin,
_we_connect,
call.data["maximum_reduced"],
)
is False
):
_LOGGER.error("Cannot send ac speed request to car")
# Register our services with Home Assistant.
hass.services.async_register(
DOMAIN, "volkswagen_id_start_stop_charging", volkswagen_id_start_stop_charging
)
hass.services.async_register(
DOMAIN, "volkswagen_id_set_climatisation", volkswagen_id_set_climatisation
)
hass.services.async_register(
DOMAIN, "volkswagen_id_set_target_soc", volkswagen_id_set_target_soc
)
hass.services.async_register(
DOMAIN, "volkswagen_id_set_ac_charge_speed", volkswagen_id_set_ac_charge_speed
)
return True
def start_stop_charging(
call_data_vin, api: weconnect.WeConnect, operation: str
) -> bool:
"""Start of stop charging of your volkswagen."""
for vin, vehicle in api.vehicles.items():
if vin == call_data_vin:
if operation == "start":
try:
if (
vehicle.controls.chargingControl is not None
and vehicle.controls.chargingControl.enabled
):
vehicle.controls.chargingControl.value = ControlOperation.START
_LOGGER.info("Sended start charging call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
if operation == "stop":
try:
if (
vehicle.controls.chargingControl is not None
and vehicle.controls.chargingControl.enabled
):
vehicle.controls.chargingControl.value = ControlOperation.STOP
_LOGGER.info("Sended stop charging call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
return True
def set_ac_charging_speed(
call_data_vin, api: weconnect.WeConnect, charging_speed
) -> bool:
"""Set charging speed in your volkswagen."""
for vin, vehicle in api.vehicles.items():
if vin == call_data_vin:
if (
charging_speed
!= vehicle.domains["charging"][
"chargingSettings"
].maxChargeCurrentAC.value
):
try:
vehicle.domains["charging"][
"chargingSettings"
].maxChargeCurrentAC.value = charging_speed
_LOGGER.info("Sended charging speed call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
return True
def set_target_soc(call_data_vin, api: weconnect.WeConnect, target_soc: int) -> bool:
"""Set target SOC in your volkswagen."""
target_soc = int(target_soc)
for vin, vehicle in api.vehicles.items():
if vin == call_data_vin:
if (
target_soc > 10
and target_soc
!= vehicle.domains["charging"]["chargingSettings"].targetSOC_pct.value
):
try:
vehicle.domains["charging"][
"chargingSettings"
].targetSOC_pct.value = target_soc
_LOGGER.info("Sended target SoC call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
return True
def set_climatisation(
call_data_vin, api: weconnect.WeConnect, operation: str, target_temperature: float
) -> bool:
"""Set climate in your volkswagen."""
for vin, vehicle in api.vehicles.items():
if vin == call_data_vin:
if (
target_temperature > 10
and target_temperature
!= vehicle.domains["climatisation"][
"climatisationSettings"
].targetTemperature_C.value
):
try:
vehicle.domains["climatisation"][
"climatisationSettings"
].targetTemperature_C.value = float(target_temperature)
_LOGGER.info("Sended target temperature call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
if operation == "start":
try:
if (
vehicle.controls.climatizationControl is not None
and vehicle.controls.climatizationControl.enabled
):
vehicle.controls.climatizationControl.value = (
ControlOperation.START
)
_LOGGER.info("Sended start climate call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
if operation == "stop":
try:
if (
vehicle.controls.climatizationControl is not None
and vehicle.controls.climatizationControl.enabled
):
vehicle.controls.climatizationControl.value = (
ControlOperation.STOP
)
_LOGGER.info("Sended stop climate call to the car")
except Exception as exc:
_LOGGER.error("Failed to send request to car - %s", exc)
return False
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
def get_object_value(value) -> str:
"""Get value from object or enum."""
while hasattr(value, "value"):
value = value.value
return value
class VolkswagenIDBaseEntity(CoordinatorEntity):
"""Common base for VolkswagenID entities."""
# _attr_should_poll = False
_attr_attribution = "Data provided by Volkswagen Connect ID"
def __init__(
self,
we_connect: weconnect.WeConnect,
coordinator: DataUpdateCoordinator,
index: int,
) -> None:
"""Initialize sensor."""
super().__init__(coordinator)
self.we_connect = we_connect
self.index = index
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, f"vw{self.data.vin}")},
manufacturer="Volkswagen",
model=f"{self.data.model}", # format because of the ID.3/ID.4 names.
name=f"Volkswagen {self.data.nickname} ({self.data.vin})",
)
@property
def data(self):
"""Shortcut to access coordinator data for the entity."""
return self.coordinator.data[self.index]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 11:33:40 2021
@author: lukepinkel
"""
import numpy as np
import scipy as sp
import scipy.stats
TWOPI = 2.0 * np.pi
LN2PI = np.log(TWOPI)
class SASH:
@staticmethod
def loglike(y, mu, sigma, nu, tau):
return _logpdf(y, mu, sigma, nu, tau)
@staticmethod
def d1loglike(y, mu, sigma, nu, tau):
return _d1logpdf(y, mu, sigma, nu, tau)
@staticmethod
def d2loglike(y, mu, sigma, nu, tau):
return _d2logpdf(y, mu, sigma, nu, tau)
@staticmethod
def d3loglike(y, mu, sigma, nu, tau):
return _d3logpdf(y, mu, sigma, nu, tau)
@staticmethod
def d4loglike(y, mu, sigma, nu, tau):
return _d4logpdf(y, mu, sigma, nu, tau)
def _expand_arrs(*arrs):
y = np.concatenate([np.expand_dims(np.asarray(arr), -1) for arr in arrs], axis=-1)
return y
def _logpdf(x, m=0.0, s=1.0, v=0.0, t=1.0):
z = (x - m) / s
y = np.arcsinh(z) * t - v
r, c, w = np.sinh(y), np.cosh(y), np.sqrt(1.0 + z**2)
logp = np.log(t) + np.log(c) - np.log(s) - np.log(w) - r**2 / 2.0 - LN2PI / 2.0
return logp
def _d1logpdf(x, m, s, v, t):
si1 = 1.0 / s
si2 = si1 / s
r = x - m
r2 = r**2
rs = si2*r2
z = (rs + 1.0)**(-1)
u = np.arcsinh(r*si1)
y = t*u - v
x8 = np.sinh(y)
x9 = np.cosh(y)
x10 = x8*x9
x11 = t/np.sqrt(rs + 1)
x12 = x11*si1
x13 = x8/x9
x14 = si2*r*x11
m = -1/2*si2*z*(2*m - 2*x) + x10*x12 - x12*x13
s = x10*x14 - x13*x14 - si1 + r2*z/s**3
v = x10 - x13
t = -x10*u + x13*u + t**(-1)
D1 = _expand_arrs(m, s, v, t)
return D1
def _d2logpdf(x, m, s, v, t):
x0 = s**(-2)
x1 = m - x
x2 = x0*x1**2
x3 = x2 + 1.0
x4 = x3**(-1)
x5 = t**2
x6 = x2 + 1
x7 = x5/x6
x8 = 2/x3**2
x9 = s**(-1)
x10 = x1*x9
x11 = np.arcsinh(x10)
x12 = t*x11
x13 = v + x12
x14 = np.sinh(x13)
x15 = x14**2
x16 = x15*x7
x17 = np.cosh(x13)
x18 = x17**2
x19 = x18*x7
x20 = x15/x18
x21 = x20*x7
x22 = x14*x17
x23 = t/x6**(3/2)
x24 = x10*x23
x25 = x14/x17
x26 = 2*x10
x27 = x1**3/s**3
x28 = 1/np.sqrt(x6)
x29 = t*x28
x30 = x22*x29
x31 = x25*x29
x32 = x2*x23
x33 = -x15 - x18 - x20 + 1
x34 = x28*x9
x35 = x12*x15
x36 = x12*x18
x37 = x12*x20
x38 = x23*x27
x39 = x0*x1
x40 = x11**2
mm = x0*(-x16 - x19 + x2*x8 - x21 + x22*x24 - x24*x25 - x4 + x7)
ms = x0*(x10*x16 + x10*x19 + x10*x21 - x10*x7 - x22*x32 + x25*x32 + x26*x4 - x27*x8 + x30 - x31)
mv = t*x33*x34
mt = x34*(x12 - x22 + x25 - x35 - x36 - x37)
ss = x0*(-x16*x2 - x19*x2 - x2*x21 - 3*x2*x4 + x2*x7 + x22*x38 - x25*x38 - x26*x30 + x26*x31 + 1 + x1**4*x8/s**4)
sv = x29*x39*(x15 + x18 + x20 - 1)
st = x28*x39*(-x12 + x22 - x25 + x35 + x36 + x37)
vv = x33
vt = x11*x33
tt = -x15*x40 - x18*x40 - x20*x40 + x40 - 1/x5
D2 = _expand_arrs(mm, ms, mv, mt, ss, sv, st, vv, vt, tt)
return D2
def _d3logpdf(x, m, s, v, t):
x0 = s**(-3)
x1 = s**(-2)
x2 = m - x
x3 = x1*x2**2
x4 = x3 + 1.0
x5 = x4**(-2)
x6 = s**(-1)
x7 = x2*x6
x8 = 6*x7
x9 = 8/x4**3
x10 = x0*x2**3
x11 = t**2
x12 = x11*x7
x13 = x3 + 1
x14 = 3/x13**2
x15 = x12*x14
x16 = np.arcsinh(x7)
x17 = t*x16
x18 = v + x17
x19 = np.sinh(x18)
x20 = x19**2
x21 = np.cosh(x18)
x22 = x21**2
x23 = x13**(-3/2)
x24 = t*x23
x25 = x19*x21
x26 = x24*x25
x27 = x21**(-1)
x28 = x19*x27
x29 = x24*x28
x30 = t**3
x31 = x23*x30
x32 = 4*x25
x33 = x31*x32
x34 = 2*x19
x35 = x27*x34
x36 = x31*x35
x37 = x21**(-3)
x38 = x19**3*x37
x39 = 2*x38
x40 = x31*x39
x41 = 3*t
x42 = x41/x13**(5/2)
x43 = x3*x42
x44 = x22**(-1)
x45 = x20*x44
x46 = x4**(-1)
x47 = x13**(-1)
x48 = 2*x47
x49 = x11*x48
x50 = x2**4/s**4
x51 = x11*x14
x52 = x3*x51
x53 = x23*x7
x54 = x25*x53
x55 = x10*x42
x56 = x28*x53
x57 = x30*x53
x58 = x20*x53
x59 = x22*x53
x60 = x32*x47
x61 = t*x60
x62 = t*x48
x63 = x28*x62
x64 = x38*x62
x65 = x45*x53
x66 = t*x1
x67 = x20*x62
x68 = x22*x62
x69 = x45*x62
x70 = x16*x60
x71 = x16*x49
x72 = x28*x71
x73 = x38*x71
x74 = x2**5/s**5
x75 = 4*x12*x47
x76 = x10*x51
x77 = 2*x21
x78 = 1/np.sqrt(x13)
x79 = x19*x78
x80 = x77*x79
x81 = 2*x78
x82 = x28*x81
x83 = x42*x50
x84 = x23*x3
x85 = x25*x84
x86 = 5*t
x87 = x28*x84
x88 = x30*x84
x89 = x20*x78
x90 = x22*x78
x91 = x20*x84
x92 = x22*x84
x93 = x44*x89
x94 = x61*x7
x95 = x63*x7
x96 = x64*x7
x97 = x45*x84
x98 = x62*x7
x99 = x17*x84
x100 = x67*x7
x101 = x68*x7
x102 = x21*x79
x103 = x27*x79
x104 = x17*x91
x105 = x17*x92
x106 = x69*x7
x107 = x12*x70
x108 = x7*x72
x109 = x17*x97
x110 = x7*x73
x111 = x20*x37
x112 = x111 - x27 - x77
x113 = x6*x81
x114 = x17*x32
x115 = x17*x35
x116 = x17*x38
x117 = 2*x116
x118 = -x20 - x22 - x45 + 1
x119 = x17*x28
x120 = x19*x77
x121 = x120*x17
x122 = x50*x51
x123 = 6*x11*x3*x47
x124 = t*x8
x125 = x42*x74
x126 = 7*x10
x127 = 2*x89
x128 = 2*x90
x129 = 2*x93
x130 = x0*x2
x131 = x20 + x22 + x45 - 1
x132 = x1*x2
x133 = x112*x34
x134 = x16**3
mmm = x0*(-x10*x9 + x15*x20 + x15*x22 + x15*x45 - x15 - x25*x43 + x26 + x28*x43 - x29 - x33 - x36 + x40 + x5*x8)
mms = x0*(x20*x49 - x20*x52 + x22*x49 - x22*x52 + x25*x55 - x28*x55 - 10*x3*x5 + x32*x57 + x35*x57 - x39*x57 - x41*x54 + x41*x56 + x45*x49 - x45*x52 + 2*x46 - x49 + x50*x9 + x52)
mmv = x66*(-x53 + x58 + x59 - x61 - x63 + x64 + x65)
mmt = x1*(-x11*x70 - x17*x53 + x17*x58 + x17*x59 + x17*x65 + x54 - x56 + x62 - x67 - x68 - x69 - x72 + x73)
mss = x0*(-t*x80 + t*x82 + 14*x10*x5 - x20*x75 + x20*x76 - x22*x75 + x22*x76 - x25*x83 + x28*x83 - x32*x88 - x35*x88 + x39*x88 - x45*x75 + x45*x76 - x46*x8 - x74*x9 + x75 - x76 + x85*x86 - x86*x87)
msv = x66*(-x78 + x84 + x89 + x90 - x91 - x92 + x93 + x94 + x95 - x96 - x97)
mst = x1*(x100 + x101 + x102 - x103 - x104 - x105 + x106 + x107 + x108 - x109 - x110 - x17*x78 + x17*x89 + x17*x90 + x17*x93 - x85 + x87 - x98 + x99)
mvv = t*x112*x113*x19
mvt = x6*x78*(-x114 - x115 + x117 + x118)
mtt = x113*x16*(x116 + x118 - x119 - x121)
sss = x0*(x10*x33 + x10*x36 - x10*x40 + x102*x124 - x103*x124 - x122*x20 - x122*x22 - x122*x45 + x122 + x123*x20 + x123*x22 + x123*x45 - x123 + x125*x25 - x125*x28 - x126*x26 + x126*x29 + 12*x3*x46 - 18*x5*x50 - 2 + x2**6*x9/s**6)
ssv = t*x130*(-x127 - x128 - x129 + x81 - x84 + x91 + x92 - x94 - x95 + x96 + x97)
sst = x130*(-x100 - x101 + x104 + x105 - x106 - x107 - x108 + x109 + x110 - x127*x17 - x128*x17 - x129*x17 + x17*x81 - x80 + x82 + x85 - x87 + x98 - x99)
svv = x19*x2*x66*x81*(-x111 + x27 + x77)
svt = x132*x78*(x114 + x115 - x117 + x131)
stt = x132*x16*x81*(-x116 + x119 + x121 + x131)
vvv = x133
vvt = x133*x16
vtt = x133*x16**2
ttt = -2*x120*x134 - 2*x134*x28 + 2*x134*x38 + 2/x30
D3 = _expand_arrs(mmm,mms, mmv, mmt, mss, msv, mst, mvv, mvt, mtt, sss,
ssv, sst, svv, svt, stt, vvv, vvt, vtt, ttt)
return D3
def _d4logpdf(x, m, s, v, t):
x0 = s**(-4)
x1 = s**(-2)
x2 = m - x
x3 = x1*x2**2
x4 = x3 + 1.0
x5 = x4**(-2)
x6 = t**2
x7 = x3 + 1
x8 = x7**(-2)
x9 = 4*x8
x10 = x6*x9
x11 = t**4
x12 = x11*x8
x13 = 2*x12
x14 = 48/x4**4
x15 = x0*x2**4
x16 = x4**(-3)
x17 = x3*x6
x18 = 15/x7**3
x19 = x17*x18
x20 = s**(-1)
x21 = x2*x20
x22 = np.arcsinh(x21)
x23 = t*x22
x24 = v + x23
x25 = np.sinh(x24)
x26 = x25**2
x27 = np.cosh(x24)
x28 = x27**2
x29 = x11*x9
x30 = x26*x29
x31 = x28*x29
x32 = x28**(-1)
x33 = x26*x32
x34 = 8*x33
x35 = x12*x34
x36 = x25**4/x27**4
x37 = 6*x36
x38 = x12*x37
x39 = x25*x27
x40 = t*x39
x41 = x7**(-5/2)
x42 = x21*x41
x43 = 9*x42
x44 = 15/x7**(7/2)
x45 = x2**3
x46 = s**(-3)
x47 = t*x46
x48 = x45*x47
x49 = x39*x48
x50 = x25/x27
x51 = t*x50
x52 = t**3
x53 = 24*x21
x54 = x52*x53
x55 = x39*x41
x56 = x48*x50
x57 = 12*x52
x58 = x42*x57
x59 = x25**3/x27**3
x60 = x2**5/s**5
x61 = x45*x46
x62 = x21*x8
x63 = x6*x62
x64 = 13*x63
x65 = x18*x6
x66 = x61*x65
x67 = x7**(-3/2)
x68 = x39*x67
x69 = 3*t
x70 = x50*x67
x71 = 12*x68
x72 = 6*x52
x73 = x59*x67
x74 = x15*x44
x75 = x3*x41
x76 = 18*x75
x77 = 24*x52
x78 = x55*x77
x79 = x57*x75
x80 = 2*x67
x81 = x6*x80
x82 = 3*x41
x83 = x3*x82
x84 = x26*x67
x85 = x28*x67
x86 = 4*x6
x87 = x84*x86
x88 = x85*x86
x89 = x26*x83
x90 = x28*x83
x91 = x32*x84
x92 = 8*x91
x93 = x6*x92
x94 = 6*x6
x95 = x36*x94
x96 = 12*x40
x97 = 6*t
x98 = x62*x97
x99 = x33*x83
x100 = x23*x67
x101 = x22*x52
x102 = x101*x80
x103 = x23*x84
x104 = x23*x85
x105 = 4*x101
x106 = x6*x71
x107 = x70*x94
x108 = 3*x50
x109 = x108*x41
x110 = x23*x91
x111 = x101*x92
x112 = x101*x37
x113 = 12*x39
x114 = x113*x22
x115 = x22*x94
x116 = x115*x62
x117 = x4**(-1)
x118 = x7**(-1)
x119 = x118*x6
x120 = 6*x119
x121 = x2**6/s**6
x122 = x15*x65
x123 = x17*x8
x124 = 22*x123
x125 = x118*x26
x126 = x118*x28
x127 = x125*x32
x128 = x44*x60
x129 = 27*x41
x130 = x21*x70
x131 = x41*x57
x132 = x131*x50
x133 = x21*x67
x134 = x133*x59
x135 = x59*x61
x136 = x61*x82
x137 = x21*x84
x138 = x21*x85
x139 = x136*x26
x140 = x136*x28
x141 = t*x118
x142 = 8*x39
x143 = x141*x142
x144 = 4*x141
x145 = x144*x50
x146 = x144*x59
x147 = x3*x8
x148 = x21*x91
x149 = x147*x97
x150 = x136*x33
x151 = 3*x21
x152 = x144*x33
x153 = x119*x22
x154 = 4*x153
x155 = x115*x147
x156 = 2*x26
x157 = x141*x156
x158 = 2*x28
x159 = x141*x158
x160 = 3*x36
x161 = x39*x80
x162 = x161*x21
x163 = 2*x1
x164 = t*x163
x165 = 2*x153
x166 = x22*x86
x167 = x127*x6
x168 = 8*x22
x169 = x120*x22*x36
x170 = 4*x23
x171 = x23*x80
x172 = x21*x50
x173 = x21*x59
x174 = x22**2
x175 = x119*x174
x176 = 2*x174*x6
x177 = x118*x23
x178 = 4*x177
x179 = x174*x86
x180 = t*x174
x181 = x2**7/s**7
x182 = 18*x21
x183 = x60*x65
x184 = x6*x8
x185 = x184*x61
x186 = 31*x185
x187 = x125*x21
x188 = 18*x6
x189 = x126*x21
x190 = 1/np.sqrt(x7)
x191 = 6*x190
x192 = x191*x39
x193 = x191*x50
x194 = x121*x44
x195 = 36*x15
x196 = x40*x41
x197 = 27*t
x198 = x3*x68
x199 = x41*x51
x200 = x3*x70
x201 = 36*x3
x202 = x52*x68
x203 = 18*x52
x204 = x131*x59
x205 = x3*x67
x206 = x205*x59
x207 = x15*x82
x208 = x207*x28
x209 = x207*x26
x210 = x3*x81
x211 = x3*x85
x212 = x3*x84
x213 = x3*x88
x214 = x3*x87
x215 = x207*x33
x216 = x3*x91
x217 = x141*x21
x218 = 16*x39
x219 = 8*x217
x220 = x48*x8
x221 = 6*x220
x222 = x221*x59
x223 = x205*x95
x224 = x221*x50
x225 = x17*x92
x226 = x113*x220
x227 = 2*x190
x228 = x227*x28
x229 = x227*x26
x230 = x229*x32
x231 = x227 - x228 - x229 - x230
x232 = x227*x23
x233 = x207*x23
x234 = 5*x3
x235 = x102*x3
x236 = x221*x26
x237 = x221*x28
x238 = x227*x39
x239 = x227*x50
x240 = x209*x23
x241 = x208*x23
x242 = x207*x39
x243 = x105*x212
x244 = x105*x211
x245 = x109*x15
x246 = x17*x71
x247 = x221*x33
x248 = x107*x3
x249 = x206*x94
x250 = x114*x185
x251 = 8*x153
x252 = x215*x23
x253 = x115*x8
x254 = x253*x50*x61
x255 = x135*x253
x256 = x111*x3
x257 = x112*x205
x258 = x157*x21
x259 = x159*x21
x260 = x190*x50
x261 = x190*x59
x262 = x161*x3
x263 = x152*x21
x264 = x160*x217
x265 = x190*x26
x266 = x190*x28
x267 = x165*x21
x268 = x265*x32
x269 = x166*x187
x270 = x166*x189
x271 = x142*x217
x272 = 4*x39
x273 = x190*x272
x274 = x145*x21
x275 = x146*x21
x276 = x170*x198
x277 = x171*x3
x278 = x277*x50
x279 = x277*x59
x280 = x127*x21
x281 = x168*x280*x6
x282 = x169*x21
x283 = x118*x21
x284 = x205*x22
x285 = x175*x21
x286 = x212*x22
x287 = x211*x22
x288 = x176*x187
x289 = x176*x189
x290 = x142*x23
x291 = x283*x290
x292 = x170*x283
x293 = x292*x50
x294 = x180*x262
x295 = x292*x59
x296 = x216*x22
x297 = x180*x200
x298 = x180*x206
x299 = x179*x280
x300 = x160*x285
x301 = 4*x33
x302 = -x156 - x158 - x160 + x301 - 1
x303 = x20*x227
x304 = 2*x39
x305 = x158*x23
x306 = x156*x23
x307 = x160*x23
x308 = x23*x301
x309 = -x23 - x305 - x306 - x307 + x308
x310 = 2*x50
x311 = 2*x59
x312 = x22*x227
x313 = 6*x39
x314 = 3*x59
x315 = x121*x65
x316 = 40*x15*x184
x317 = 36*x17
x318 = x181*x44
x319 = 45*x60
x320 = 48*x48
x321 = x61*x77
x322 = 6*x265
x323 = 6*x266
x324 = 6*x268
x325 = x39*x53
x326 = 12*x217
x327 = x0*x2
x328 = 7*x3
x329 = 12*x153
x330 = x227*x59
x331 = x2*x46
x332 = x1*x2
x333 = x227*x332
x334 = x23 + x305 + x306 + x307 - x308
x335 = 2*x302
x336 = x22**4
mmmm = x0*(x10*x26 + x10*x28 + x10*x33 - x10 - x13 + x14*x15 - 48*x16*x3 - x19*x26 - x19*x28 - x19*x33 + x19 - x30 - x31 + x35 - x38 - x40*x43 + x43*x51 + x44*x49 - x44*x56 + 6*x5 + x50*x58 + x54*x55 - x58*x59)
mmms = x0*(x13*x21 - x14*x60 + 72*x16*x61 + x21*x30 + x21*x31 - x21*x35 + x21*x38 - x26*x64 + x26*x66 - x28*x64 + x28*x66 - x3*x78 - x33*x64 + x33*x66 - x40*x74 + x40*x76 - x5*x53 - x50*x79 + x51*x74 - x51*x76 + x52*x71 + x59*x79 + x64 - x66 - x68*x69 + x69*x70 + x70*x72 - x72*x73)
mmmv = x47*(x50*x98 - x59*x98 + x62*x96 - x67*x95 - x67 - x81 + x83 + x84 + x85 - x87 - x88 - x89 - x90 + x91 + x93 - x99)
mmmt = x46*(-x100 - x102 + x103 + x104 - x105*x84 - x105*x85 - x106 - x107 + x109*x3 + x110 + x111 - x112*x67 + x114*x63 + x116*x50 - x116*x59 + x23*x83 - x23*x89 - x23*x90 - x23*x99 + x26*x98 + x28*x98 + x33*x98 - x39*x83 + x68 - x70 + x73*x94 - x98)
mmss = x0*(-12*t*x130 + t*x21*x71 - 6*x117 + x120 + x121*x14 - x122*x26 - x122*x28 - x122*x33 + x122 + x124*x26 + x124*x28 + x124*x33 - x124 - x125*x94 - x126*x94 - x127*x94 + x128*x40 - x128*x51 - x129*x49 + x129*x56 - x13*x3 - x130*x57 - x131*x135 + x132*x61 + x134*x57 - 96*x15*x16 - x3*x30 - x3*x31 + x3*x35 - x3*x38 + 54*x3*x5 - x54*x68 + x61*x78)
mmsv = x47*(x133*x95 + 3*x133 - x136 - 3*x137 - 3*x138 + x139 + x140 + x143 + x145 - x146 - x147*x96 - 3*x148 - x149*x50 + x149*x59 + x150 + x21*x81 + x21*x87 + x21*x88 - x21*x93)
mmst = x46*(x100*x151 + x102*x21 - x103*x151 - x104*x151 + x105*x137 + x105*x138 + x106*x21 + x107*x21 - x109*x61 - x110*x151 - x111*x21 + x112*x133 - x114*x123 + 3*x130 - x134*x94 - x136*x23 + x136*x39 + x139*x23 + x140*x23 + x142*x153 + x144*x26 + x144*x28 - x144 - x149*x26 - x149*x28 - x149*x33 + x149 + x150*x23 - x151*x68 + x152 + x154*x50 - x154*x59 - x155*x50 + x155*x59)
mmvv = x164*(x130 - x134 - x141*x160 - x141 + x152 - x157 - x159 + x162)
mmvt = x1*(-x125*x166 - x126*x166 - x133 + x137 + x138 - x143 - x145 + x146 + x148 - x165 + x167*x168 - x169 + x170*x21*x68 + x171*x172 - x171*x173)
mmtt = x163*(x118 - x125*x176 - x125 - x126*x176 - x126 + x127*x179 - x127 + x130*x180 - x133*x22 - x134*x180 + x137*x22 + x138*x22 - x142*x177 + x148*x22 - x160*x175 + x162*x180 - x175 - x178*x50 + x178*x59)
msss = x0*(t*x192 - t*x193 + x117*x53 - x119*x182 + x13*x61 - x132*x15 - x14*x181 + x15*x204 - x15*x78 + 120*x16*x60 + x167*x182 + x183*x26 + x183*x28 + x183*x33 - x183 - x186*x26 - x186*x28 - x186*x33 + x186 + x187*x188 + x188*x189 - x194*x40 + x194*x51 + x195*x196 - x195*x199 - x197*x198 + x197*x200 + x200*x203 + x201*x202 - x203*x206 + x30*x61 + x31*x61 - x35*x61 + x38*x61 - 96*x5*x61)
mssv = x47*(-5*x205 + x207 - x208 - x209 - x210 + 5*x211 + 5*x212 - x213 - x214 - x215 + 5*x216 - x217*x218 - x219*x50 + x219*x59 - x222 - x223 + x224 + x225 + x226 + x231)
msst = x46*(-x100*x234 + x103*x234 + x104*x234 + x110*x234 - x153*x21*x218 - x172*x251 + x173*x251 - 5*x200 - x217*x34 - x219*x26 - x219*x28 + x219 - x221 - x228*x23 - x229*x23 - x23*x230 + x232 + x233 + x234*x68 - x235 + x236 + x237 - x238 + x239 - x240 - x241 - x242 - x243 - x244 + x245 - x246 + x247 - x248 + x249 + x250 - x252 + x254 - x255 + x256 - x257)
msvv = x164*(-x200 + x206 + x217 + x238 + x258 + x259 + x260 - x261 - x262 - x263 + x264)
msvt = x1*(-x190 + x205 - x211 - x212 - x216 + x23*x273 + x232*x50 - x232*x59 + x265 + x266 + x267 + x268 + x269 + x270 + x271 + x274 - x275 - x276 - x278 + x279 - x281 + x282)
mstt = x163*(x180*x238 + x180*x260 - x180*x261 + x187 + x189 - x190*x22 + x22*x265 + x22*x266 + x22*x268 + x280 - x283 + x284 + x285 - x286 - x287 + x288 + x289 + x291 + x293 - x294 - x295 - x296 - x297 + x298 - x299 + x300)
mvvv = t*x302*x303
mvvt = x303*(-x304 + x309 - x50 + x59)
mvtt = x20*x312*(-x272 + x309 - x310 + x311)
mttt = x174*x303*(-x108 + x309 - x313 + x314)
ssss = x0*(t*x260*x53 - 60*x117*x3 + x119*x201 - 144*x121*x16 - x125*x317 - x126*x317 - x127*x317 - x13*x15 + x132*x60 - x15*x30 - x15*x31 + x15*x35 - x15*x38 + 150*x15*x5 - x190*x40*x53 - x196*x319 + x199*x319 - 48*x202*x61 - x204*x60 - x26*x315 + x26*x316 - x28*x315 + x28*x316 - x315*x33 + x315 + x316*x33 - x316 + x318*x40 - x318*x51 + x320*x68 - x320*x70 - x321*x70 + x321*x73 + x60*x78 + 6 + x14*x2**8/s**8)
sssv = t*x327*(x141*x325 - x191 + 7*x205 - x207 + x208 + x209 + x210 - 7*x211 - 7*x212 + x213 + x214 + x215 - 7*x216 + x222 + x223 - x224 - x225 - x226 + x322 + x323 + x324 + x326*x50 - x326*x59)
ssst = x327*(x100*x328 - x103*x328 - x104*x328 - x110*x328 + x153*x325 + x172*x329 - x173*x329 - x191*x23 + x192 - x193 + 7*x200 + x221 + x23*x322 + x23*x323 + x23*x324 - x233 + x235 - x236 - x237 + x240 + x241 + x242 + x243 + x244 - x245 + x246 - x247 + x248 - x249 - x250 + x252 - x254 + x255 - x256 + x257 + x26*x326 + x28*x326 + x326*x33 - x326 - x328*x68)
ssvv = 2*x2*x47*(x200 - x206 - x217 - x239 - x258 - x259 + x262 + x263 - x264 - x273 + x330)
ssvt = x331*(-x170*x260 + x170*x261 - x190*x290 - x205 + x211 + x212 + x216 + x231 - x267 - x269 - x270 - x271 - x274 + x275 + x276 + x278 - x279 + x281 - x282)
sstt = 2*x331*(-x180*x239 - x180*x273 + x180*x330 - x187 - x189 - x22*x228 - x22*x229 - x22*x230 - x280 + x283 - x284 - x285 + x286 + x287 - x288 - x289 - x291 - x293 + x294 + x295 + x296 + x297 - x298 + x299 - x300 + x312)
svvv = t*x333*(x156 + x158 + x160 - x301 + 1)
svvt = x333*(x304 + x334 + x50 - x59)
svtt = x312*x332*(x272 + x310 - x311 + x334)
sttt = x174*x333*(x108 + x313 - x314 + x334)
vvvv = 2*x302
vvvt = x22*x335
vvtt = x174*x335
vttt = x22**3*x335
tttt = -2*x156*x336 - 2*x158*x336 - 2*x160*x336 + 2*x301*x336 - 2*x336 - 6/x11
D4 = _expand_arrs(mmmm, mmms, mmmv, mmmt, mmss, mmsv, mmst, mmvv, mmvt,
mmtt, msss, mssv, msst, msvv, msvt, mstt, mvvv, mvvt,
mvtt, mttt, ssss, sssv, ssst, ssvv, ssvt, sstt, svvv,
svvt, svtt, sttt, vvvv, vvvt, vvtt, vttt, tttt)
return D4
def _pdf(x, m=0.0, s=1.0, v=0.0, t=1.0):
return np.exp(_logpdf(x, m=m, s=s, v=v, t=t))
def _cdf(x, m=0.0, s=1.0, v=0.0, t=1.0):
z = (x - m) / s
r = np.sinh(t * np.arcsinh(z) - v)
return sp.special.ndtr(r)
def _qtf(q, m=0.0, s=1.0, v=0.0, t=1.0):
y = sp.special.ndtri(q)
z = np.sinh((np.arcsinh(y) + v) / t)
x = s * z + m
return x
def _rvs(random_state, m=0.0, s=1.0, v=0.0, t=1.0, size=None):
u = random_state.uniform(low=0.0, high=1.0, size=size)
y = _qtf(u, m=m, s=s, v=v, t=t)
return y
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Advent of Code 2020
# https://github.com/scorphus/advent-of-code-2020
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2020, Pablo S. Blum de Aguiar <[email protected]>
from aoc import strip
# fmt: off
DIRS = {
"e": ( 1, 0), # noqa
"w": (-1, 0), # noqa
"nw": (-1, 1), # noqa
"se": ( 1, -1), # noqa
"ne": ( 0, 1), # noqa
"sw": ( 0, -1), # noqa
}
# fmt: on
def part1(lines):
return sum(read(lines).values())
def part2(lines, days=100):
tiles = set(p for p, c in read(lines).items() if c)
for _ in range(days):
tiles = flip(tiles)
return len(tiles)
def read(lines):
tiles = {}
for line in strip(lines):
line = line.rstrip("\n")
x, y = 0, 0
while line:
for dir_, (dx, dy) in DIRS.items():
if line.startswith(dir_):
x += dx
y += dy
line = line[len(dir_) :]
continue
tiles[x, y] = not tiles.get((x, y), False)
return tiles
def flip(tiles):
adjacent, next_tiles = {}, set()
for tile in tiles:
for deltas in DIRS.values():
adj = tuple(c + d for c, d in zip(tile, deltas))
adjacent[adj] = adjacent.get(adj, 0) + 1
for tile, adj in adjacent.items():
if tile not in tiles and adj == 2 or tile in tiles and 0 < adj < 3:
next_tiles.add(tile)
return next_tiles
|
TEXT = 'AACAAGCTGATAAACATTTAAAGAG'
PATTERN = 'AAAAA'
D = 2
def print_num_list_as_str(numlist, glue=' '):
print(glue.join(str(x) for x in numlist))
def hamming_distance(text1, text2):
if len(text1) != len(text2):
return -1
return sum([0 if text1[i] == text2[i] else 1 for i in range(len(text1))])
def count_approx_kmers(text, pattern, d):
k = len(pattern)
return sum([1 for i in range(len(text)-k+1) if hamming_distance(text[i:i+k], pattern) <= d])
ans = count_approx_kmers(TEXT, PATTERN, D)
print(ans)
|
#
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
# Inspired by the work of David Johnston (C) 2017: https://github.com/dj-on-github/sp800_22_tests
#
# NistRng is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import packages
import numpy
import math
import sys
# Import required src
from nistrng import Test, Result
class DiscreteFourierTransformTest(Test):
"""
Discrete Fourier transform (spectral) test as described in NIST paper: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-22r1a.pdf
The focus of this test is the peak heights in the Discrete Fourier Transform of the sequence.
The purpose of this test is to detect periodic features (i.e., repetitive patterns that are near each other) in the
tested sequence that would indicate a deviation from the assumption of randomness.
The intention is to detect whether the number of peaks exceeding the 95% threshold is significantly different than 5%.
The significance value of the test is 0.01.
"""
def __init__(self):
# Generate base Test class
super(DiscreteFourierTransformTest, self).__init__("Discrete Fourier Transform", 0.01)
def _execute(self,
bits: numpy.ndarray) -> Result:
"""
Overridden method of Test class: check its docstring for further information.
"""
# Make sure the sequence is even in length
bits_copy: numpy.ndarray = bits.copy()
if (bits_copy.size % 2) == 1:
bits_copy = bits_copy[:-1]
# Convert all the zeros in the array to -1
bits_copy[bits_copy == 0] = -1
# Compute DFT
discrete_fourier_transform = numpy.fft.fft(bits_copy)
# Compute magnitudes of first half of sequence depending on the system type
if sys.version_info > (3, 0):
magnitudes = abs(discrete_fourier_transform)[:bits_copy.size // 2]
else:
magnitudes = abs(discrete_fourier_transform)[:bits_copy.size / 2]
# Compute upper threshold
threshold: float = math.sqrt(math.log(1.0 / 0.05) * bits_copy.size)
# Compute the expected number of peaks (N0)
expected_peaks: float = 0.95 * bits_copy.size / 2.0
# Count the peaks above the upper threshold (N1)
counted_peaks: float = float(len(magnitudes[magnitudes < threshold]))
# Compute the score (P-value) using the normalized difference
normalized_difference: float = (counted_peaks - expected_peaks) / math.sqrt((bits_copy.size * 0.95 * 0.05) / 4)
score: float = math.erfc(abs(normalized_difference) / math.sqrt(2))
# Return result
if score >= self.significance_value:
return Result(self.name, True, numpy.array(score))
return Result(self.name, False, numpy.array(score))
def is_eligible(self,
bits: numpy.ndarray) -> bool:
"""
Overridden method of Test class: check its docstring for further information.
"""
# This test is always eligible for any sequence
return True
|
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="bopt",
version="0.1.0",
author="Jakub Arnold",
author_email="[email protected]",
description="Bayesian Optimization",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/darthdeus/bopt",
packages=setuptools.find_packages(),
package_data={
"": ["LICENSE"],
"bopt": ["templates/*"]
},
python_requires=">=3.6, <4",
install_requires=[
"numpy>=1.17.4",
"scipy>=1.1.0",
"pyyaml>=5.1",
"tqdm~=4.39.0",
"flask~=1.0.2",
"psutil~=5.6.3",
"jsonpickle~=1.0",
"GPy[plotting]~=1.9.9",
"filelock~=3.0.10",
"ipdb~=0.11",
"livereload==2.5.1",
"colored==1.4.2"
],
extras_require={
"plotting": [
"matplotlib~=3.2.0",
],
"dev": [
"pylint",
"coverage",
"deepdiff",
"Cython",
"pytest",
"mypy",
"radon",
]
},
entry_points={
"console_scripts": [
"bopt=bopt.cli.cli:main"
]
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
from typing import Dict, Tuple
import os
import numpy
def _empty_cmap() -> Dict:
return {i: [0, 0, 0, 0] for i in range(256)}
def _update_alpha(cmap: Dict, idx: Tuple[int], alpha: int = 0) -> None:
if not isinstance(idx, tuple):
idx = tuple((idx,))
for i in idx:
cmap[i] = cmap[i][0:3] + (alpha,)
def _remove_value(cmap: Dict, idx: Tuple[int]) -> None:
if not isinstance(idx, tuple):
idx = tuple((idx,))
for i in idx:
cmap.pop(i, None)
def _update_cmap(cmap: Dict, values: Dict) -> None:
for i, color in values.items():
if len(color) == 3:
color += [255]
cmap[i] = color
def get_colormap(name: str) -> Dict:
"""
Return colormap dict.
Attributes
----------
name : str, optional
Colormap name (default: cfastie)
Returns
-------
colormap : dict
GDAL RGBA Color Table dictionary.
"""
cmap_file = os.path.join(os.path.dirname(__file__), "cmaps", f"{name}.npy")
cmap = numpy.load(cmap_file)
return {idx: value.tolist() for idx, value in enumerate(cmap)}
# From https://github.com/mojodna/marblecutter/blob/5b9040ba6c83562a465eabdbb6e8959e6a8bf041/marblecutter/utils.py#L35
def make_lut(colormap: Dict) -> numpy.ma.array:
"""
Create a lookup table numpy.ma.array from a GDAL RGBA Color Table dictionary.
Attributes
----------
colormap : dict
GDAL RGBA Color Table dictionary.
Returns
-------
lut : numpy.ndarray
returns a JSON like object with the metadata.
"""
lut = numpy.zeros(shape=(256, 4), dtype=numpy.uint8)
for i, color in colormap.items():
lut[int(i)] = color
return lut
def apply_cmap(data: numpy.ndarray, cmap: Dict) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Apply colormap on tile data.
Attributes
----------
colormap : dict
GDAL RGBA Color Table dictionary.
Returns
-------
lut : numpy.array
returns a JSON like object with the metadata.
"""
if data.shape[0] > 1:
raise Exception("Source data must be 1 band")
lookup_table = make_lut(cmap)
# apply the color map
data = lookup_table[data[0], :]
data = numpy.transpose(data, [2, 0, 1])
return data[:-1], data[-1]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/10/12 15:23
# @Author : Dong Wang
# @FileName: STR_search.py
# @Software: STRsearch
# @github :https://github.com/AnJingwd/STRsearch
from __future__ import division
import os,re
import numpy as np
from functools import reduce
from multiprocessing import Pool as Pool
def STR_nomenclature_trans(period,STR_Repeat_Motif):
'''
Transfor STR motif to STR unit list ,
if stand is "-", get Inverse complementary sequence of STR unit list,
And record which unit needs to be counted for STR allele using a list.
input: like [TCTA]1TCTG[TCTA]14
output: like (['TCTA', 'TCTG', 'TCTA'], [1, 0, 1])
'''
STR_units = re.findall(r"([a-zA-Z]+)", re.sub("n","",STR_Repeat_Motif))
repetitive_motifs = [1 if re.findall(r'[[](.*?)[]]',i) else 0 for i in STR_Repeat_Motif.split(" ")]
STR_numbers = []
non_period_unit_len = [len(m) for m in re.findall(r"([A-Z]+)", STR_Repeat_Motif) if len(m) < period]
if sum(non_period_unit_len) == period:
weight = 1/len(non_period_unit_len)
elif sum(non_period_unit_len) > period:
weight = (sum(non_period_unit_len) // period + sum(non_period_unit_len) % period * 0.1) / len(
non_period_unit_len)
else:
weight = 0.1*sum(non_period_unit_len)
for i in STR_units:
if re.match(r"[a-z]+", i):
STR_numbers.append(0)
else:
if len(i) < period:
STR_numbers.append(weight)
else:
STR_numbers.append(1)
STR_units_upper = [j.upper() for j in STR_units]
return STR_units_upper, repetitive_motifs,STR_numbers
def find_lcseque(seq,STR_unit):
'''
seach STR unit in reads a sequence using Longest common subsequence algorithm
input:
seq: a sequence
STR_unit: the sequence of a STR unit
output:
local_max_list:local max score list
p_list: position list
'''
m = np.zeros([len(seq) + 1,len(STR_unit) + 1])
local_max = 0
p = 0
local_max_list = []
p_list = []
for i in range(len(seq)):
for j in range(len(STR_unit)):
if seq[i] == STR_unit[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] == len(STR_unit):
mmax = m[i + 1][j + 1]
p = i + 1
local_max_list.append(int(mmax))
p_list.append(int(p))
return (local_max_list,p_list)
def match_flank(seq,flank):
'''
match flanking sequences using dynamic programming, and output the location of the least mismatched
input:
seq: reads sequnece
flank: flank sequence of 15 bp
output: the location of the least mismatched and the number of mismatched
'''
length = len(flank)
resultMissmatchCount = length
seqdict = {}
for index, s in enumerate(seq[:-length]):
missmatch = 0
for j, k in zip(seq[index:index + length], flank): # [(s1[0],s2[0]),(s1[1],s2[1]),...]
if j != k:
missmatch += 1
if missmatch <= resultMissmatchCount:
seqdict[missmatch] = seq[index:index + length]
resultMissmatchCount = missmatch
minkey = min(seqdict.keys())
result = seqdict[minkey]
start,end = seq.index(result),seq.index(result)+length
return start,end,resultMissmatchCount
def merge_intervals(intervals):
''''
Merge interval list
input: like [[1,3],[2,6],[8,10],[15,18]]
output: like [[1,6],[8,10],[15,18]]
'''
intervals.sort(key=lambda x: x[0])
merged = []
for interval in intervals:
if not merged or merged[-1][-1] < interval[0]:
merged.append(interval)
else:
merged[-1][-1] = max(merged[-1][-1], interval[-1])
return merged
def get_STR_unit_region(score_list,pos_list,start_point = 0):
'''
Compute the intervals of STR unit and union
input:
output:
'''
intervals_list = []
for i in range(0,len(score_list)):
start = pos_list[i]-score_list[i] + start_point
end = pos_list[i] + start_point
intervals_list.append([start,end])
if score_list == []:
return []
elif score_list[0]==2: ## when a unit is part of other unit ,don’t merge intervals
return intervals_list
else:
intervals_union = merge_intervals(intervals_list) ## get union intervals of STR units
return intervals_union
def list_depth(items):
'''
Calculate the depth of nested lists
input: list
output: max depth of list
'''
max_depth = 1 if isinstance(items, list) else 0
if max_depth:
for item in items:
if isinstance(item, list):
max_depth = max(max_depth, list_depth(item) + 1)
else:
return max_depth
return max_depth
def get_interval_max(interval_list):
if list_depth(interval_list) == 1: ## like [1,4]
return interval_list
else:
if len(interval_list)==1: # depth=2 and len = 1 like [[-1,-1,-1,-1]]
return interval_list[0]
else: ## depth=2 and len > 1 like [[-1,-1,-1,-1],[-1,-1,3,10]]
max_length = 0
max_index = 0
for i in range(0, len(interval_list)):
interval_new = [j for j in interval_list[i] if j != -1]
if interval_new == []:
pass
else:
(start, end) = min(interval_new), max(interval_new)
length = end - start
if length > max_length:
max_length = length
max_index = i
return interval_list[max_index]
def get_continue_region(region1,region2):
## all region [)
if region1==region2 == []:
return [[-1,-1,-1,-1]]
elif region1== [] or region2 == []:
return [[-1,-1]+[k[0],k[1]] for k in region2] if region1== [] else [get_interval_max(region1)+[-1, -1]]
else:
continue_region = []
for i in range(0,len(region1)):
for j in range(0,len(region2)):
if region1[i][-1] == region2[j][0]:
region = [k for k in region1[i]]+[m for m in region2[j]]
continue_region.append(region)
elif region1[i][-1] > region2[j][0] and region1[i][-1] < region2[j][1]:
region = [k for k in region1[i]]+[region1[i][-1],region2[j][1]]
continue_region.append(region)
else:
pass
continue_region.append(get_interval_max(region1) +[-1,-1])
continue_region +=[[-1,-1]*(int(len(region1[0])/2))+[k[0],k[1]] for k in region2]
return continue_region
def get_STR_seq(max_order_region, STR_units_upper,repetitive_motifs,STR_numbers,seq,period,flank_left_region,flank_right_region):
mySTR_start,mySTR_end = int(min([i for i in max_order_region if i!=-1])),int(max(max_order_region))
start_index, end_index = int(max_order_region.index(mySTR_start)/2),int(max_order_region.index(mySTR_end)/2)
max_order_region_len = [max_order_region[i+1]- max_order_region[i] for i in range(0,len(max_order_region),2)]
max_order_region_len_sub = max_order_region_len[start_index:end_index+1]
STR_units_sub = STR_units_upper[start_index:end_index+1]
STR_units_sub_len = [len(j) for j in STR_units_sub]
repetitive_motifs_sub = repetitive_motifs[start_index:end_index+1]
STR_numbers_sub = STR_numbers[start_index:end_index+1]
STR_unit_rep_count = np.array(max_order_region_len_sub)/np.array(STR_units_sub_len)*np.array(STR_numbers_sub)
STR_unit_rep_count_new = [int(STR_unit_rep_count[i]) if repetitive_motifs_sub[i]==1 else STR_unit_rep_count[i] for i in range(len(repetitive_motifs_sub))]
rep_num = sum(STR_unit_rep_count_new)
STR_to_flank_distance = len(seq[flank_left_region[1]:mySTR_start]) + len(seq[mySTR_end:flank_right_region[0]])
allele_sum = rep_num+(STR_to_flank_distance)//period+(STR_to_flank_distance)%period*0.1
allele = int(allele_sum) if (allele_sum-int(allele_sum))<0.1 else allele_sum
STR_result_pat = ""
for k in range(0,len(STR_units_sub)):
if STR_numbers_sub[k] == 1:
if repetitive_motifs_sub[k]==1:
unit = "[" + STR_units_sub[k] + "]"+str(STR_unit_rep_count_new[k])
else:
unit = STR_units_sub[k]
elif STR_numbers_sub[k] == 0:
unit = STR_units_sub[k].lower()
else:
unit = STR_units_sub[k]
STR_result_pat +=unit+" "
STR_result_pat_full = seq[flank_left_region[1]:mySTR_start].upper()+" " + STR_result_pat +seq[mySTR_end:flank_right_region[0]].upper()
flank_mismatchs = str(flank_left_region[2])+","+str(flank_right_region[2])
distances = str(mySTR_start)+","+str(len(seq)-mySTR_end)
return STR_result_pat_full,allele,max_order_region_len,rep_num,flank_mismatchs,distances
def STR_search(STR_Repeat_Motif,period,seq,flank_seq_left,flank_seq_right):
flank_left_region,flank_right_region = match_flank(seq,flank_seq_left),match_flank(seq,flank_seq_right)
STR_units_upper, repetitive_motifs,STR_numbers= STR_nomenclature_trans(period,STR_Repeat_Motif)
region_list_all = []
for STR_unit in STR_units_upper:
(local_max_score_list,p_list_left) = find_lcseque(seq,STR_unit)
region_list = get_STR_unit_region(local_max_score_list, p_list_left, 0)
region_list_all.append(region_list)
region_list_all_not_empty = [i for i in region_list_all if i !=[]]
if len(region_list_all_not_empty)==0: # if all units mismatched, the read will be skipped
return -1
else:
max_order_region_list = reduce(get_continue_region,region_list_all)
max_order_region_list_new = [j for j in max_order_region_list if len(set(j))!=1]
max_order_region= get_interval_max(max_order_region_list_new)
(STR_result_pat_full,allele,max_order_region_len,rep_num,flank_mismatchs,distances) = get_STR_seq(max_order_region, STR_units_upper,repetitive_motifs,STR_numbers, seq,period,flank_left_region,flank_right_region)
if sum(list(map(lambda a,b:a*b,repetitive_motifs,max_order_region_len))) ==0:
return -1
else:
return (STR_result_pat_full,allele,rep_num,flank_mismatchs,distances)
def STR_search_one_locus(args_list):
marker_name,STR_Repeat_Motif,period,STR_fastq_file,result_file,flank_seq_left,flank_seq_right= args_list
results = open(result_file,"w")
results.write("STR_sequence_structure\tAllele\tFlank_mismatchs(5',3')\tDistance_to_reads_ends(5',3')\n")
N_line = 0
with open(STR_fastq_file,"r") as f1:
for line in f1:
N_line+=1
if N_line % 4 != 2:
pass
else:
myseq = line.strip()
res =STR_search(STR_Repeat_Motif,period,myseq,flank_seq_left,flank_seq_right)
if res == -1:
continue
else:
(STR_result_pat_full,allele,rep_num,flank_mismatchs,distances) =res
if rep_num<=1:
pass
else:
results.write(str(STR_result_pat_full)+"\t"+ str(allele) +"\t"+flank_mismatchs+ "\t"+distances+"\n")
results.close()
print("{} has been decoded completely! ".format(marker_name))
def count_reads(fq_file):
'''
count number of reads from fastq file
'''
count = 0
with open(fq_file,"r") as f1:
for line in f1:
count+=1
return int(count/4)
################### STR genotype for locus listed in bed file one by one #####################
def main(working_path,sample,sex,fastq_dir,ref_bed,reads_threshold,num_processors):
## create STR genotyping path
STR_results_dir=os.path.join(working_path,"STRsearch")
if not os.path.exists(STR_results_dir):
os.makedirs(STR_results_dir)
info_list = []
bed = open(ref_bed,"r")
next(bed)
N = 0
for line in bed:
line = line.strip()
content_list = line.split("\t")
chr = content_list[0]
period = int(content_list[3])
marker_name = content_list[5]
STR_Repeat_Motif = content_list[7]
stand = content_list[-3]
flank_seq_left,flank_seq_right = content_list[-2],content_list[-1]
STR_fastq_merge_file = os.path.join(fastq_dir, marker_name +"_reads_"+sample+"_merge.fastq")
result_file = os.path.join(STR_results_dir,marker_name+"_results_{}.txt".format(sample))
if count_reads(STR_fastq_merge_file)<= int(reads_threshold): # if reads < 30 in fastq, the STR locus won't be genotyped , but create empty file
os.system("touch {0}".format(result_file))
print("{} has been decoded completely!".format(marker_name))
else:
if sex =="female" and chr == "chrY":
os.system("touch {0}".format(result_file))
print("{} has been decoded completely!".format(marker_name))
else:
info_list.append([marker_name,STR_Repeat_Motif,period,STR_fastq_merge_file,result_file,flank_seq_left,flank_seq_right])
N+=1
bed.close()
pool = Pool(num_processors)
pool.imap(STR_search_one_locus, info_list)
pool.close()
pool.join()
print('Finished searching for total {} STR locus'.format(N))
if __name__=='__main__':
sys.exit(main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6],sys.argv[7]))
|
from inspect import isclass, signature
from .AuthorizationResponse import AuthorizationResponse
from ..exceptions.exceptions import GateDoesNotExist, PolicyDoesNotExist
class Gate:
def __init__(
self,
application,
user_callback=None,
policies={},
permissions={},
before_callbacks=[],
after_callbacks=[],
):
self.application = application
self.user_callback = user_callback
self.policies = policies
self.permissions = permissions
self.before_callbacks = before_callbacks
self.after_callbacks = after_callbacks
def define(self, permission, condition):
if not callable(condition):
raise Exception(f"Permission {permission} should be given a callable.")
self.permissions.update({permission: condition})
def register_policies(self, policies):
for model_class, policy_class in policies:
self.policies[model_class] = policy_class
return self
def get_policy_for(self, instance_or_class):
if isinstance(instance_or_class, str):
# TODO: load model from str, get class and get policies
policy = None
elif isclass(instance_or_class):
policy = self.policies.get(instance_or_class, None)
else:
policy = self.policies.get(instance_or_class.__class__, None)
if policy:
return policy()
else:
return None
def before(self, before_callback):
if not callable(before_callback):
raise Exception("before() should be given a callable.")
self.before_callbacks.append(before_callback)
def after(self, after_callback):
if not callable(after_callback):
raise Exception("before() should be given a callable.")
self.after_callbacks.append(after_callback)
def allows(self, permission, *args):
return self.inspect(permission, *args).allowed()
def denies(self, permission, *args):
return not self.inspect(permission, *args).allowed()
def has(self, permission):
return permission in self.permissions
def for_user(self, user):
return Gate(
self.application,
lambda: user,
self.policies,
self.permissions,
self.before_callbacks,
self.after_callbacks,
)
def any(self, permissions, *args):
"""Check that every of those permissions are allowed."""
for permission in permissions:
if self.denies(permission, *args):
return False
return True
def none(self, permissions, *args):
"""Check that none of those permissions are allowed."""
for permission in permissions:
if self.allows(permission, *args):
return False
return True
def authorize(self, permission, *args):
return self.inspect(permission, *args).authorize()
def inspect(self, permission, *args):
"""Get permission checks results for the given user then builds and returns an
authorization response."""
boolean_result = self.check(permission, *args)
if isinstance(boolean_result, AuthorizationResponse):
return boolean_result
if boolean_result:
return AuthorizationResponse.allow()
else:
return AuthorizationResponse.deny()
def check(self, permission, *args):
"""The core of the authorization class. Run before() checks, permission check and then
after() checks."""
user = self._get_user()
# run before checks and returns immediately if non null response
result = None
for callback in self.before_callbacks:
result = callback(user, permission)
if result:
break
# run permission checks if nothing returned previously
if result is None:
# first check in policy
permission_method = None
if len(args) > 0:
policy = self.get_policy_for(args[0])
if policy:
try:
permission_method = getattr(policy, permission)
except AttributeError:
raise PolicyDoesNotExist(
f"Policy method {permission} not found in {policy.__class__.__name__}."
)
if not permission_method:
# else check in gates
try:
permission_method = self.permissions[permission]
except KeyError:
raise GateDoesNotExist(
f"Gate {permission} has not been found. Did you declare it ?"
)
params = signature(permission_method).parameters
# check if user parameter is optional (meaning that guests users are allowed)
if (
permission_method.__defaults__
and permission_method.__defaults__[0] is None
and not user
):
result = True
elif not user:
result = False
elif len(params) == 1:
result = permission_method(user)
else:
result = permission_method(user, *args)
# run after checks
for callback in self.after_callbacks:
after_result = callback(user, permission, result)
result = after_result if after_result is not None else result
return result
def _get_user(self):
from ..facades import Request
if self.user_callback:
return self.user_callback()
else:
return Request.user()
|
from formencode import Schema, validators, FancyValidator, Invalid, ForEach
from dateutil.parser import parse
class ValidateISODate(FancyValidator):
@staticmethod
def _to_python(value, state):
try:
val = parse(value)
except ValueError:
raise Invalid("Date/time format is invalid, it must be ISO 8601 formatted "
"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)", value, state)
return val
class PublicKeyValidator(Schema):
id = validators.String()
active = validators.Bool()
date_created = ValidateISODate()
date_expires = ValidateISODate()
public_key = validators.String()
allow_extra_fields = True
class DirectoryUserDeviceLinkResponseValidator(Schema):
qrcode = validators.String() # URL
code = validators.String(min=7)
allow_extra_fields = True
class DirectoryGetDeviceResponseValidator(Schema):
id = validators.String()
name = validators.String()
status = validators.Int()
type = validators.String()
allow_extra_fields = True
class DirectoryGetSessionsValidator(Schema):
auth_request = validators.String()
date_created = ValidateISODate()
service_icon = validators.String()
service_id = validators.String()
service_name = validators.String()
allow_extra_fields = True
class DirectoryValidator(Schema):
id = validators.String()
service_ids = ForEach(validators.String())
sdk_keys = ForEach(validators.String())
premium = validators.Bool()
name = validators.String()
android_key = validators.String()
ios_certificate_fingerprint = validators.String()
active = validators.Bool()
allow_extra_fields = True
class AuthorizationResponseValidator(Schema):
auth = validators.String()
service_user_hash = validators.String()
org_user_hash = validators.String()
user_push_id = validators.String()
public_key_id = validators.String()
allow_extra_fields = True
class AuthorizationResponsePackageValidator(Schema):
service_pins = ForEach()
auth_request = validators.String() # UUID
response = validators.Bool()
device_id = validators.String()
allow_extra_fields = True
class AuthorizeValidator(Schema):
auth_request = validators.String(not_empty=True)
push_package = validators.String(if_missing=None, not_empty=True)
allow_extra_fields = True
class AuthorizeSSEValidator(Schema):
service_user_hash = validators.String()
api_time = validators.String()
allow_extra_fields = True
class ServiceValidator(Schema):
id = validators.String()
icon = validators.String()
name = validators.String()
description = validators.String()
active = validators.Bool()
callback_url = validators.String()
allow_extra_fields = True
class ServiceSecurityPolicyValidator(Schema):
allow_extra_fields = True
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.70
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailchimp_marketing.api_client import ApiClient
class TemplatesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client):
self.api_client = api_client
def delete_template(self, template_id, **kwargs): # noqa: E501
"""Delete template # noqa: E501
Delete a specific template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_template(template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_template_with_http_info(template_id, **kwargs) # noqa: E501
else:
(data) = self.delete_template_with_http_info(template_id, **kwargs) # noqa: E501
return data
def delete_template_with_http_info(self, template_id, **kwargs): # noqa: E501
"""Delete template # noqa: E501
Delete a specific template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_template_with_http_info(template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['template_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'template_id' is set
if ('template_id' not in params or
params['template_id'] is None):
raise ValueError("Missing the required parameter `template_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'template_id' in params:
path_params['template_id'] = params['template_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/templates/{template_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list(self, **kwargs): # noqa: E501
"""List templates # noqa: E501
Get a list of an account's available templates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:param str created_by: The Mailchimp account user who created the template.
:param str since_date_created: Restrict the response to templates created after the set date. Uses ISO 8601 time format: 2015-10-21T15:41:36+00:00.
:param str before_date_created: Restrict the response to templates created before the set date. Uses ISO 8601 time format: 2015-10-21T15:41:36+00:00.
:param str type: Limit results based on template type.
:param str category: Limit results based on category.
:param str folder_id: The unique folder id.
:param str sort_field: Returns user templates sorted by the specified field.
:param str sort_dir: Determines the order direction for sorted results.
:return: Templates
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_with_http_info(**kwargs) # noqa: E501
return data
def list_with_http_info(self, **kwargs): # noqa: E501
"""List templates # noqa: E501
Get a list of an account's available templates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:param str created_by: The Mailchimp account user who created the template.
:param str since_date_created: Restrict the response to templates created after the set date. Uses ISO 8601 time format: 2015-10-21T15:41:36+00:00.
:param str before_date_created: Restrict the response to templates created before the set date. Uses ISO 8601 time format: 2015-10-21T15:41:36+00:00.
:param str type: Limit results based on template type.
:param str category: Limit results based on category.
:param str folder_id: The unique folder id.
:param str sort_field: Returns user templates sorted by the specified field.
:param str sort_dir: Determines the order direction for sorted results.
:return: Templates
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields', 'exclude_fields', 'count', 'offset', 'created_by', 'since_date_created', 'before_date_created', 'type', 'category', 'folder_id', 'sort_field', 'sort_dir'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
if 'count' in params and params['count'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `count` when calling ``, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'created_by' in params:
query_params.append(('created_by', params['created_by'])) # noqa: E501
if 'since_date_created' in params:
query_params.append(('since_date_created', params['since_date_created'])) # noqa: E501
if 'before_date_created' in params:
query_params.append(('before_date_created', params['before_date_created'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'category' in params:
query_params.append(('category', params['category'])) # noqa: E501
if 'folder_id' in params:
query_params.append(('folder_id', params['folder_id'])) # noqa: E501
if 'sort_field' in params:
query_params.append(('sort_field', params['sort_field'])) # noqa: E501
if 'sort_dir' in params:
query_params.append(('sort_dir', params['sort_dir'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/templates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Templates', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_template(self, template_id, **kwargs): # noqa: E501
"""Get template info # noqa: E501
Get information about a specific template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_template(template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: TemplateInstance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_template_with_http_info(template_id, **kwargs) # noqa: E501
else:
(data) = self.get_template_with_http_info(template_id, **kwargs) # noqa: E501
return data
def get_template_with_http_info(self, template_id, **kwargs): # noqa: E501
"""Get template info # noqa: E501
Get information about a specific template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_template_with_http_info(template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: TemplateInstance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['template_id', 'fields', 'exclude_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'template_id' is set
if ('template_id' not in params or
params['template_id'] is None):
raise ValueError("Missing the required parameter `template_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'template_id' in params:
path_params['template_id'] = params['template_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/templates/{template_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplateInstance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_default_content_for_template(self, template_id, **kwargs): # noqa: E501
"""View default content # noqa: E501
Get the sections that you can edit in a template, including each section's default content. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_content_for_template(template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: TemplateDefaultContent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_default_content_for_template_with_http_info(template_id, **kwargs) # noqa: E501
else:
(data) = self.get_default_content_for_template_with_http_info(template_id, **kwargs) # noqa: E501
return data
def get_default_content_for_template_with_http_info(self, template_id, **kwargs): # noqa: E501
"""View default content # noqa: E501
Get the sections that you can edit in a template, including each section's default content. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_content_for_template_with_http_info(template_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: TemplateDefaultContent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['template_id', 'fields', 'exclude_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_default_content_for_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'template_id' is set
if ('template_id' not in params or
params['template_id'] is None):
raise ValueError("Missing the required parameter `template_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'template_id' in params:
path_params['template_id'] = params['template_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/templates/{template_id}/default-content', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplateDefaultContent', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_template(self, template_id, body, **kwargs): # noqa: E501
"""Update template # noqa: E501
Update the name, HTML, or `folder_id` of an existing template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_template(template_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:param TemplateInstance2 body: (required)
:return: TemplateInstance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_template_with_http_info(template_id, body, **kwargs) # noqa: E501
else:
(data) = self.update_template_with_http_info(template_id, body, **kwargs) # noqa: E501
return data
def update_template_with_http_info(self, template_id, body, **kwargs): # noqa: E501
"""Update template # noqa: E501
Update the name, HTML, or `folder_id` of an existing template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_template_with_http_info(template_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str template_id: The unique id for the template. (required)
:param TemplateInstance2 body: (required)
:return: TemplateInstance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['template_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'template_id' is set
if ('template_id' not in params or
params['template_id'] is None):
raise ValueError("Missing the required parameter `template_id` when calling ``") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'template_id' in params:
path_params['template_id'] = params['template_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/templates/{template_id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplateInstance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create(self, body, **kwargs): # noqa: E501
"""Add template # noqa: E501
Create a new template for the account. Only Classic templates are supported. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TemplateInstance1 body: (required)
:return: TemplateInstance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_with_http_info(body, **kwargs) # noqa: E501
return data
def create_with_http_info(self, body, **kwargs): # noqa: E501
"""Add template # noqa: E501
Create a new template for the account. Only Classic templates are supported. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TemplateInstance1 body: (required)
:return: TemplateInstance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/templates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplateInstance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
"""
Question:
Write a program which takes 2 digits, X,Y as input and generates a 2-dimensional array. The element value in the i-th row and j-th column of the array should be i*j.
Note: i=0,1.., X-1; j=0,1,¡Y-1.
Example
Suppose the following inputs are given to the program:
3,5
Then, the output of the program should be:
[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]
"""
str_input = input()
dimensions = [int(x) for x in str_input.split(',')]
rowNum = dimensions[0]
columnNum = dimensions[1]
multilist = [[0 for col in range(columnNum)] for row in range(rowNum)]
"""
This creates a 4X6 dimensional array of 0's
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
"""
print(multilist)
for row in range(rowNum):
for col in range(columnNum):
multilist[row][col]= row*col
print(multilist)
"""
------------------Row 1----------------------
for row in range(0):
for col in range(6):
multilist[0][0]= 1*0 = 0
for row in range(0):
for col in range(6):
multilist[0][1]= 0*1 = 0
for row in range(0):
for col in range(6):
multilist[0][2]= 0*2 = 0
for row in range(0):
for col in range(6):
multilist[0][3]= 0*3 = 0
for row in range(0):
for col in range(6):
multilist[0][4]= 0*4 = 0
for row in range(0):
for col in range(6):
multilist[0][5]= 0*5 = 0
for row in range(0):
for col in range(6):
multilist[0][6]= 0*6 = 0
------------------Row 2-------------------------
for row in range(1):
for col in range(6):
multilist[1][0]= 1*0 = 0
for row in range(1):
for col in range(6):
multilist[1][1]= 1*1 = 1
for row in range(1):
for col in range(6):
multilist[1][2]= 1*2 = 2
for row in range(1):
for col in range(6):
multilist[1][3]= 1*3 = 3
for row in range(1):
for col in range(6):
multilist[1][4]= 1*4 = 4
for row in range(1):
for col in range(6):
multilist[1][5]= 1*5 = 5
for row in range(1):
for col in range(6):
multilist[1][6]= 1*6 = 6
------------------Row 3---------------------------
for row in range(2):
for col in range(6):
multilist[2][0]= 2*0 = 0
for row in range(2):
for col in range(6):
multilist[2][1]= 2*1 = 2
for row in range(2):
for col in range(6):
multilist[2][2]= 2*2 = 4
for row in range(2):
for col in range(6):
multilist[2][3]= 2*3 = 6
for row in range(2):
for col in range(6):
multilist[2][4]= 2*4 = 8
for row in range(2):
for col in range(6):
multilist[2][5]= 2*5 = 10
for row in range(2):
for col in range(6):
multilist[2][6]= 2*6 = 12
------------------Row 4---------------------------------
for row in range(3):
for col in range(6):
multilist[3][0]= 3*0 = 0
for row in range(3):
for col in range(6):
multilist[3][1]= 3*1 = 3
for row in range(3):
for col in range(6):
multilist[3][2]= 3*2 = 6
for row in range(3):
for col in range(6):
multilist[3][3]= 3*3 = 9
for row in range(3):
for col in range(6):
multilist[3][4]= 3*4 = 12
for row in range(3):
for col in range(6):
multilist[3][5]= 3*5 = 15
""" |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from baxter_maintenance_msgs/UpdateStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class UpdateStatus(genpy.Message):
_md5sum = "74e246350421569590252c39e8aa7b85"
_type = "baxter_maintenance_msgs/UpdateStatus"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# See the class UpdateRunner()
# status: One-word description of the current action being performed
# long_description: Details pertaining to status if any. Used for verbose error messages.
uint16 status
float32 progress
string long_description
uint16 STS_IDLE = 0
uint16 STS_INVALID = 1
uint16 STS_BUSY = 2
uint16 STS_CANCELLED = 3
uint16 STS_ERR = 4
uint16 STS_MOUNT_UPDATE = 5
uint16 STS_VERIFY_UPDATE = 6
uint16 STS_PREP_STAGING = 7
uint16 STS_MOUNT_STAGING = 8
uint16 STS_EXTRACT_UPDATE = 9
uint16 STS_LOAD_KEXEC = 10
"""
# Pseudo-constants
STS_IDLE = 0
STS_INVALID = 1
STS_BUSY = 2
STS_CANCELLED = 3
STS_ERR = 4
STS_MOUNT_UPDATE = 5
STS_VERIFY_UPDATE = 6
STS_PREP_STAGING = 7
STS_MOUNT_STAGING = 8
STS_EXTRACT_UPDATE = 9
STS_LOAD_KEXEC = 10
__slots__ = ['status','progress','long_description']
_slot_types = ['uint16','float32','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status,progress,long_description
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(UpdateStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = 0
if self.progress is None:
self.progress = 0.
if self.long_description is None:
self.long_description = ''
else:
self.status = 0
self.progress = 0.
self.long_description = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Hf = None
def _get_struct_Hf():
global _struct_Hf
if _struct_Hf is None:
_struct_Hf = struct.Struct("<Hf")
return _struct_Hf
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ansible_merge_vars',
version='5.0.0',
description='An Ansible action plugin to explicitly merge inventory variables', # noqa
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/leapfrogonline/ansible-merge-vars',
author='Leapfrog Online',
author_email='[email protected]',
classifiers=[ # Optional
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: System :: Systems Administration',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='ansible plugin', # Optional
py_modules=["ansible_merge_vars"],
project_urls={ # Optional
'Bug Reports': 'https://github.com/leapfrogonline/ansible-merge-vars/issues',
'Source': 'https://github.com/leapfrogonline/ansible-merge-vars/',
},
)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import paddle
from .utils.verification import CallBackVerification
from .utils.io import Checkpoint
from . import backbones
def validation(args):
checkpoint = Checkpoint(
rank=0,
world_size=1,
embedding_size=args.embedding_size,
num_classes=None,
checkpoint_dir=args.checkpoint_dir, )
backbone = eval("backbones.{}".format(args.backbone))(
num_features=args.embedding_size)
checkpoint.load(backbone, for_train=False, dtype='float32')
backbone.eval()
callback_verification = CallBackVerification(
1, 0, args.batch_size, args.val_targets, args.data_dir)
callback_verification(1, backbone)
|
import json
class ArxivDocEncoder(json.JSONEncoder):
def default(self, o):
try:
to_serialize = {
"id": o.doc_id,
"url": o.url,
"title": o.title,
"abstract": o.abstract,
"authors": ",".join(o.authors),
"publish_date": o.publish_date,
"pdf": o.pdf_url,
}
return to_serialize
except AttributeError:
return super().default(o)
|
import os
import sys
import struct
import numpy
from time import time
from particles_container import ParticlesContainer
import multiprocessing as mp
import threading
from data_parsers import BackupDataParser
#from numba import jit,float64,int8,autojit
def rd(f, t, max_particles):
while True:
try:
print "Reading ",f
trsp = t.transport_for_file(f)
return BackupDataParser(trsp).raw_particles(max_particles)
except:
print "Bad thing while reading, trying once more"
class BackupDataReader:
def __init__(self, elemets_description = None, chunk_size = 100000):
self.elemets_description = elemets_description
self.chunk_size = chunk_size
if not self.elemets_description:
return
for elem in elemets_description:
try:
elemets_description[elem]["id"]
except:
raise "Elements description has to have \"id\" field"
def read(self, transport, max_particles = 100000, np=5):
print ( "Reading backup directory " + transport.address )
files = transport.list()
containers = {}
if self.elemets_description:
for element in self.elemets_description:
containers[element] = ParticlesContainer(element, self.elemets_description[element]["atom_d"], self.elemets_description[element]["atom_mass"])
else:
containers = ParticlesContainer("Unknown", 1, 1)
if self.elemets_description:
print ("Created {} containers".format(len(containers)))
particles_count = max_particles;
pool = mp.Pool(processes=np)
results = [pool.apply_async(rd, args=(f, transport, max_particles)) for f in files]
print "Created ",np," readers"
ts = time();
for raw_particles in results:
raw_particles = raw_particles.get()
n = raw_particles["n"]
t = raw_particles["t"]
x = raw_particles["x"]
y = raw_particles["y"]
z = raw_particles["z"]
vx = raw_particles["vx"]
vy = raw_particles["vy"]
vz = raw_particles["vz"]
particles_count = raw_particles["count"]
tts = time()
if self.elemets_description:
for element in self.elemets_description:
# print "Filtering by type ", element
containers[element].add_particles(n[t == self.elemets_description[element]["id"]],
x[t == self.elemets_description[element]["id"]],
y[t == self.elemets_description[element]["id"]],
z[t == self.elemets_description[element]["id"]],
vx[t == self.elemets_description[element]["id"]],
vy[t == self.elemets_description[element]["id"]],
vz[t == self.elemets_description[element]["id"]])
else:
# print "Finalizing"
containers.add_particles(n, x, y, z, vx, vy, vz)
if self.elemets_description:
for element in self.elemets_description:
print "Finalizing ", element
containers[element].finalize()
print ("Readed [{}] {} particles".format(len(containers[element].n),element))
else:
print "Finalizing "
containers.finalize()
print ("Readed [{}] particles".format(len(containers.n)))
print "Readed directory, total read time:", time()-ts
return containers
|
from django.conf.urls import url
from django.conf import settings
from django.urls import include, path
from rest_framework import routers
from core import views
class OptionalTrailingSlashRouter(routers.SimpleRouter):
def __init__(self):
super().__init__()
self.trailing_slash = '/?'
router = OptionalTrailingSlashRouter()
router.register('museums', views.MuseumAPI, base_name='museum')
router.register('quiz', views.QuizAPI, base_name='quiz')
app_name = 'core'
urlpatterns = (
path('profile/', views.profile),
path('leaderboard/', views.leaderboard),
path('login', views.login),
path('privacy/', views.privacy),
path('', include(router.urls)),
)
if settings.DEBUG is True:
urlpatterns += (url(r'.debug/login', views.login_debug),)
|
from flask import request
from idManager.model import authentication_service, token_service, account_service, message_service
from idManager.view import authentication_view, header_view
from . import id_manager_blueprint
from flask_cors import cross_origin
@id_manager_blueprint.route('/auth/', methods=['POST'])
@header_view.verify_content_type
@header_view.add_response_headers
@cross_origin()
def auth_login():
ver = request.headers.get('ver')
data = request.get_json(force=True, silent=True)
if not data:
# Bad Request
message_service.expected_json_data()
# Use 'or ver is None' at the last version
if ver == '1' or not ver:
# Validate Schema using the write version.
account_data, errors = account_service.register_account_schema.load(data)
if errors:
# Bad Request
message_service.wrong_json_data(errors)
auth = authentication_service.auth_login_ver_1(account_data["email"], account_data["password"])
response = authentication_view.auth_login(**auth)
response.status_code = auth.get('http_status_code')
return response
# elif header['ver'] == '2':
# return auth_login_ver_2(username, password, ip)
else:
# Bad Request
message_service.invalid_api_ver()
@id_manager_blueprint.route('/auth/', methods=['GET'])
@header_view.verify_content_type
@token_service.validate_token
@header_view.add_response_headers
@cross_origin()
def auth_is_valid():
ver = request.headers.get('ver')
token = request.headers.get('token')
# Use 'or ver is None' at the last version
if ver == '1' or not ver:
auth = authentication_service.auth_is_valid_ver_1(token)
response = authentication_view.auth_is_valid(**auth)
response.status_code = auth.get('http_status_code')
return response
# elif header['ver'] == '2':
# return auth_logout_ver_2()
else:
# Bad Request
message_service.invalid_api_ver()
@id_manager_blueprint.route('/auth/', methods=['DELETE'])
@header_view.verify_content_type
@token_service.validate_token
@header_view.add_response_headers
@cross_origin()
def auth_logout():
ver = request.headers.get('ver')
token = request.headers.get('token')
# Use 'or ver is None' at the last version
if ver == '1' or not ver:
auth = authentication_service.auth_logout_ver_1(token)
response = authentication_view.auth_logout(**auth)
response.status_code = auth.get('http_status_code')
return response
# elif header['ver'] == '2':
# return auth_is_valid_ver_2()
else:
# Bad Request
message_service.invalid_api_ver()
|
"""
This module stores all the information and parameters that are used to
configurate the modules, sometimes called groups of my qtile configuration.
A group, is a workspace that can hold N number of windows, in my configuration I
have enabled 9 groups.
For more information check out: github.com/pablocorbalann/dotfiles/tree/main/qtile
"""
from libqtile.config import Key, Group
from libqtile.command import lazy
from settings.keys import mod, keys
# For using this icons you will have to install a nerd font (any of them). Check
# out: nerd-fonts.com, and then enable it (in arch you have to put the .ttf, odtf or
# the font itself in the /usr/local/share/fonts/ dir, for more information about how
# to enable the font check: wiki.archlinux.org/index.php/Fonts# Icons:
#
# Bellow you can see the icon of each group and it's nerd font name:
#
# nerdfonts.com/cheat-sheet
groups = [Group(f" {icon} ") for icon in [
"", # Icon for the web browser: nf-fa-firefox
"", # Icon of Python: nf-fae-python
"", # Icon of the terminal: nf-dev-terminal
"", # Icon of the html lang: nf-fa-code
"", # Icon of the settings: nf-seti-config
"", # Icon of the file manager: nf-mdi-folder
"", # Icon of the image viewer: nf-mdi-image
"", # Icon of the video player: nf-fa-video_camera
"", # Icon of the layers: nf-mdi-layers
]]
for i, group in enumerate(groups):
actual_key = str(i + 1)
keys.extend([
# Switch to workspace N
Key([mod], actual_key, lazy.group[group.name].toscreen()),
# Send window to workspace N
Key([mod, "shift"], actual_key, lazy.window.togroup(group.name))
])
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 10:16:18 2018
@author: KrRish
"""
import os
print("Path at terminal when executing this file")
print(os.getcwd() + "\n")
os.chdir("E:\python")
f=open("file.txt","r")
print(f.readline(5))
print(f.read())
print(f.readlines())
#**********************************************
f=open("file.txt","r")
with open("file.txt",'r') as f:
d=f.readlines()
for var in d:
print(var)
#***********************************************
f=open("file.txt","r+")
|
from abc import ABCMeta, abstractmethod
from typing import Sequence
from .tech.timestamp import time_from_timestamp
from .meta import BeadName, InputSpec
class Bead:
'''
Interface to metadata of a bead.
Unique identifier:
box_name, name, content_id
content_id guarantees same data content, but beads with same content can have
different metadata, including where it is to be found (box_name) and under which name,
or how to find the referenced input beads (see input_map).
'''
# high level view of computation
kind: str
# kind is deprecated. Humans naturally agree on domain specific names instead.
# The price is living with bad, undescriptive names, that are hard to improve upon later.
name: BeadName
inputs: Sequence[InputSpec]
# frozen beads only details
# (workspaces fake them with recognisable values)
content_id: str
freeze_time_str: str
box_name: str
@property
def freeze_time(self):
return time_from_timestamp(self.freeze_time_str)
def get_input(self, name):
for input in self.inputs:
if name == input.name:
return input
class UnpackableBead(Bead, metaclass=ABCMeta):
'''
Provide high-level access to content of a bead.
'''
def unpack_to(self, workspace):
self.unpack_code_to(workspace.directory)
workspace.create_directories()
self.unpack_meta_to(workspace)
@abstractmethod
def unpack_data_to(self, path):
pass
@abstractmethod
def unpack_code_to(self, path):
pass
@abstractmethod
def unpack_meta_to(self, workspace):
pass
|
#Calculo de IMC.
x="s"
while x=="s":
sx=input("Digite seu sexo ")
n=input("Digite seu nome ")
p=float(input("Digite peso "))
a=float(input("Digite altura "))
imc=(p)/(a*a)
print(imc)
if sx==("Masculino"):
if imc<20.7:
print("abaixo do peso")
elif imc>=20.7:
print("Peso ideal")
elif imc>26.4:
print("Marginalmnete assima do peso")
elif imc>27.8:
print("Assima do peso ideal")
if sx==("Feminino"):
if imc<19.1:
print("abaixo do peso")
elif imc>=19.1:
print("Peso ideal")
elif imc>25.8:
print("Marginalmente assima do peso")
elif imc>27.3:
print("Assima do peso ideal")
x=input("Quer ver de outra pessoa? [s/n]:")
|
from jose.backends.base import Key
from jose.constants import ALGORITHMS
from jose.exceptions import JWKError
try:
from jose.backends import RSAKey # noqa: F401
except ImportError:
pass
try:
from jose.backends import ECKey # noqa: F401
except ImportError:
pass
try:
from jose.backends import AESKey # noqa: F401
except ImportError:
pass
try:
from jose.backends import DIRKey # noqa: F401
except ImportError:
pass
try:
from jose.backends import HMACKey # noqa: F401
except ImportError:
pass
def get_key(algorithm):
if algorithm in ALGORITHMS.KEYS:
return ALGORITHMS.KEYS[algorithm]
elif algorithm in ALGORITHMS.HMAC: # noqa: F811
return HMACKey
elif algorithm in ALGORITHMS.RSA:
from jose.backends import RSAKey # noqa: F811
return RSAKey
elif algorithm in ALGORITHMS.EC:
from jose.backends import ECKey # noqa: F811
return ECKey
elif algorithm in ALGORITHMS.AES:
from jose.backends import AESKey # noqa: F811
return AESKey
elif algorithm == ALGORITHMS.DIR:
from jose.backends import DIRKey # noqa: F811
return DIRKey
return None
def register_key(algorithm, key_class):
if not issubclass(key_class, Key):
raise TypeError("Key class is not a subclass of jwk.Key")
ALGORITHMS.KEYS[algorithm] = key_class
ALGORITHMS.SUPPORTED.add(algorithm)
return True
def construct(key_data, algorithm=None):
"""
Construct a Key object for the given algorithm with the given
key_data.
"""
# Allow for pulling the algorithm off of the passed in jwk.
if not algorithm and isinstance(key_data, dict):
algorithm = key_data.get("alg", None)
if not algorithm:
raise JWKError("Unable to find an algorithm for key: %s" % key_data)
key_class = get_key(algorithm)
if not key_class:
raise JWKError("Unable to find an algorithm for key: %s" % key_data)
return key_class(key_data, algorithm)
|
"""Example of framework usage"""
import random
from acai_ml.core import Engine
import pandas as pd
from pydataset import data
from pathlib import Path
from dbkcore.core import trace
from dbkcore.core import Log
from dbkdev.data_steps import DataStep, DataStepDataframe
from dbkdev.data_steps import apply_test
from sklearn.model_selection import ParameterSampler
from sklearn.utils.fixes import loguniform
from pyspark.sql import functions as F
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import svm
class Step_loadData(DataStep):
"""Load the defined dataset."""
def test(self):
"""Apply data tests."""
self.test_is_dataframe_empty(df=self.output_data.dataframe)
self.test_null_values(
cols=['Sepal.Length', 'Sepal.Width'],
df=self.output_data.dataframe
)
@apply_test
@trace
def initialize(self, name_dataset: str):
"""
Initialize the DataStep.
Parameters
----------
name_dataset : str
Name of the dataset to load from pydataset package
"""
p_df = data(name_dataset)
p_df.columns = [c.replace('.', '') for c in p_df.columns]
dt = self.spark.createDataFrame(p_df)
self.set_output_data(dt)
class Step_crossValidate(DataStep):
"""Run multiple models in parallel."""
def test(self):
pass
@trace(attrs_refact=['appi_ik'])
def initialize(
self,
dt: DataStepDataframe,
pipeline_name: str,
appi_ik: str,
n_iter: int
):
param_grid = {
'C': loguniform(1e0, 1e3),
'kernel': ['linear', 'rbf'],
'class_weight': ['balanced', None]
}
rng = np.random.RandomState(0)
param_list = list(
ParameterSampler(
param_grid,
n_iter=n_iter,
random_state=rng
)
)
# p_dt = Engine.get_instance().spark().createDataFrame(pd.DataFrame(param_list)).\
# withColumn('id', F.monotonically_increasing_id())
p_dt = self.spark.createDataFrame(pd.DataFrame(param_list)).\
withColumn('id', F.monotonically_increasing_id())
dt_train = dt.dataframe.crossJoin(
p_dt
)
udf_schema = dt_train.select(
'id',
F.lit(0.0).alias('score')
).schema
def pudf_train(dt_model):
param_id = dt_model['id'].unique()[0]
param_c = dt_model['C'].unique()[0]
param_class_weight = dt_model['class_weight'].unique()[0]
param_kernel = dt_model['kernel'].unique()[0]
logging_custom_dimensions = {
'id': str(param_id),
'C': str(param_c),
'class_weight': param_class_weight,
'kernel': param_kernel
}
Log(pipeline_name, appi_ik)
try:
# Raising randomly exception
if random.randint(0, 20) > 15:
raise 'Random exception'
dt_x = dt_model[
[
'SepalLength',
'SepalWidth',
'PetalLength',
'PetalWidth'
]
]
y = dt_model['Species']
clf = svm.SVC(
kernel=param_kernel,
C=param_c,
class_weight=param_class_weight,
random_state=42
)
scores = cross_val_score(clf, dt_x, y, cv=5, scoring='f1_macro')
score = scores.mean()
dt_out = pd.DataFrame(
{
'id': [param_id],
'score': [score]
}
)
Log.get_instance().log_info("Training:success", custom_dimension=logging_custom_dimensions)
except Exception:
Log.get_instance().log_error("Training:failed", custom_dimension=logging_custom_dimensions)
dt_out = pd.DataFrame(
{
'id': [param_id],
'score': [-1]
}
)
return dt_out
'''
dt_model = dt_train.where(F.col('id') == 17179869184).toPandas()
'''
dt_cross_evals = dt_train.\
groupBy(['id']).\
applyInPandas(pudf_train, schema=udf_schema).\
cache()
dt_cross_evals.count()
self.set_output_data(dt_cross_evals)
Engine()
Engine().get_instance().initialize_env()
# pipeline_name = Path(__file__).stem
pipeline_name = "Remote Testing"
Engine().get_instance().initialize_logger(pipeline_name=pipeline_name)
# Engine().get_instance().spark().conf.set("spark.sql.execution.arrow.enabled", "true")
run_id = 'test_run_id'
step_loadData = Step_loadData(
spark=Engine.get_instance().spark(),
run_id=run_id
)
step_loadData.initialize(
name_dataset='iris'
)
step_crossValidate = Step_crossValidate(
spark=Engine.get_instance().spark(),
run_id=run_id
)
step_crossValidate.initialize(
dt=step_loadData.output_data,
pipeline_name=pipeline_name,
appi_ik=Engine().get_instance().appi_ik,
n_iter=1000
)
step_crossValidate.output_data.dataframe.toPandas()
|
def parse_opcode(token):
"""Extract the opcode and the mode of all params"""
opcode = token % 100
modes = [0, 0, 0, 0]
if token > 100:
for (i, mode) in enumerate(str(token)[-3::-1]):
modes[i] = int(mode)
return opcode, modes
def param_value(memory, position, mode):
"""Get the value of a param according to its mode"""
if mode == 0: # position mode
return memory[memory[position]]
elif mode == 1: # immediate mode
return memory[position]
else:
raise ValueError("Unknown mode : ", mode)
def run_program(memory, inputs):
"""Execute the successive instructions of the program"""
instr_ptr = 0
output = []
while True:
(opcode, modes) = parse_opcode(memory[instr_ptr])
if opcode == 99: # Program end
break
elif opcode in [1, 2]: # 1 = Addition, 2 = Multiplication
param1 = param_value(memory, instr_ptr + 1, modes[0])
param2 = param_value(memory, instr_ptr + 2, modes[1])
output_address = memory[instr_ptr + 3]
memory[output_address] = (param1 + param2) if opcode == 1 else (param1 * param2)
# move forward the instruction pointer
instr_ptr += 4
elif opcode == 3: # Store input in memory (program init)
val_to_insert = inputs.pop(0)
output_address = memory[instr_ptr + 1]
memory[output_address] = val_to_insert
instr_ptr += 2
elif opcode == 4: # Output a value
val_to_output = param_value(memory, instr_ptr + 1, modes[0])
output.append(val_to_output)
instr_ptr += 2
else:
raise ValueError("Invalid opcode : ", opcode)
return output
def solve(memory):
"""Return the last value of the output"""
return run_program(memory, [1])[-1]
def parse(file_name):
"""Parse the data file into a list of int"""
with open(file_name, "r") as f:
return [int(x) for x in f.readline().split(",")]
if __name__ == '__main__':
print(solve(parse("data.txt")))
|
#
#
# Copyright (c) 2008, 2009 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#
# $Id$
import unittest
import sys
import ctypes
try:
import pyglet
from pyglet.gl import *
except ImportError:
import warnings
warnings.warn("Pyglet not installed, some texturizer tests disabled")
pyglet = None
class TexTestBase:
def assertVector(self, vec3, exp, tolerance=0.0001):
x, y, z = exp
self.failUnless(abs(vec3.x - x) <= tolerance, (vec3, (x, y, z)))
self.failUnless(abs(vec3.y - y) <= tolerance, (vec3, (x, y, z)))
self.failUnless(abs(vec3.z - z) <= tolerance, (vec3, (x, y, z)))
def _make_group(self, pcount):
from lepton import ParticleGroup
group = ParticleGroup()
self._add_particles(group, pcount)
self.assertEqual(len(group), pcount)
return group
def _add_particles(self, group, pcount):
from lepton import Particle
for i in range(pcount):
group.new(Particle())
group.update(0)
class SpriteTexturizerTest(TexTestBase, unittest.TestCase):
def test_default_coords(self):
from lepton.texturizer import SpriteTexturizer
tex = SpriteTexturizer(0)
self.assertEqual(tex.tex_dimension, 2)
expected = (0, 0, 1, 0, 1, 1, 0, 1)
self.assertEqual(tex.tex_coords, None)
self.assertEqual(tex.weights, None)
group = self._make_group(4)
coords = tex.generate_tex_coords(group)
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(tuple(coords), expected * (len(coords) // 8))
return tex, group
def test_default_coords_growing_group(self):
tex, group = self.test_default_coords()
self._add_particles(group, 200)
expected = (0, 0, 1, 0, 1, 1, 0, 1)
coords = tex.generate_tex_coords(group)
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(tuple(coords), expected * (len(coords) // 8))
def test_single_coord_set(self):
from lepton.texturizer import SpriteTexturizer
coord_set = (0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5)
tex = SpriteTexturizer(0, coords=[coord_set])
self.assertEqual(tex.tex_dimension, 2)
self.assertEqual(tex.tex_coords, (coord_set,))
self.assertEqual(tex.weights, None)
group = self._make_group(4)
coords = tex.generate_tex_coords(group)
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(tuple(coords), coord_set * (len(coords) // 8))
return coord_set, tex, group
def test_single_coord_set_growing_group(self):
coord_set, tex, group = self.test_single_coord_set()
self._add_particles(group, 200)
expected = (0, 0, 1, 0, 1, 1, 0, 1)
coords = tex.generate_tex_coords(group)
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(tuple(coords), coord_set * (len(coords) // 8))
def test_mutiple_coord_sets(self):
from lepton.texturizer import SpriteTexturizer
coord_set1 = (0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1)
coord_set2 = ((0, 0.5), (0.5, 0.5), (0.5, 1), (0, 1))
coord_set3 = (0.5, 0, 0, 1, 0, 0, 1, 0.5, 0, 0.5, 0.5, 0)
tex = SpriteTexturizer(0, coords=[coord_set1, coord_set2, coord_set3])
coord_sets = tex.tex_coords
self.assertEqual(coord_sets, (
(0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1),
(0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1),
(0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5))
)
self.assertEqual(tex.weights, None)
group = self._make_group(6)
coords = tuple(tex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(coords[:8], coord_sets[0])
self.assertEqual(coords[8:16], coord_sets[1])
self.assertEqual(coords[16:24], coord_sets[2])
self.assertEqual(coords[24:32], coord_sets[0])
self.assertEqual(coords[32:40], coord_sets[1])
self.assertEqual(coords[40:48], coord_sets[2])
def test_coord_set_weights(self):
from lepton.texturizer import SpriteTexturizer
coord_set1 = ((0.5, 0.5), (1, 0.5), (1, 1), (0.5, 1))
coord_set2 = (0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1)
coord_set3 = (0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5)
tex = SpriteTexturizer(0,
coords=(coord_set1, coord_set2, coord_set3), weights=(20, 30, 50))
coord_sets = tex.tex_coords
self.assertEqual(coord_sets, (
(0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1),
(0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1),
(0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5))
)
self.assertEqual(len(tex.weights), 3)
self.assertAlmostEqual(tex.weights[0], 0.20)
self.assertAlmostEqual(tex.weights[1], 0.30)
self.assertAlmostEqual(tex.weights[2], 0.50)
group = self._make_group(1000)
coords = tuple(tex.generate_tex_coords(group))
self.failUnless(len(coords) >= 8000, (len(coords), len(group)))
counts = {coord_sets[0]: 0, coord_sets[1]: 0, coord_sets[2]: 0}
for i in range(1000):
cset = coords[i * 8:i * 8 + 8]
self.failUnless(cset in counts, cset)
counts[cset] += 1
self.assertEqual(sum(counts.values()), 1000)
self.failUnless(250 > counts[coord_sets[0]] > 150, counts[coord_sets[0]])
self.failUnless(375 > counts[coord_sets[1]] > 225, counts[coord_sets[1]])
self.failUnless(600 > counts[coord_sets[2]] > 400, counts[coord_sets[2]])
def test_coord_set_weights_deterministic(self):
from lepton.texturizer import SpriteTexturizer
coord_set1 = ((0.5, 0.5), (1, 0.5), (1, 1), (0.5, 1))
coord_set2 = (0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1)
coord_set3 = (0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5)
tex = SpriteTexturizer(0,
coords=(coord_set1, coord_set2, coord_set3), weights=(20, 70, 10))
coord_sets = tex.tex_coords
group = self._make_group(20)
coords = [tuple(tex.generate_tex_coords(group)) for i in range(20)]
for cs in coords:
self.assertEqual(cs, coords[0])
def test_aspect_adjust(self):
from lepton.texturizer import SpriteTexturizer
coord_set1 = (0, 0, 1, 0, 1, 0.5, 0, 0.5)
coord_set2 = (0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1)
tex = SpriteTexturizer(0, coords=(coord_set1, coord_set2))
self.failIf(tex.aspect_adjust_width)
self.failIf(tex.aspect_adjust_height)
sizes = [
(1, 1, 0),
(2, 3, 0),
]
group = self._make_group(2)
for size, p in zip(sizes, group):
p.size = size
self.assertEqual([tuple(p.size) for p in group], sizes)
tex.generate_tex_coords(group)
self.assertEqual([tuple(p.size) for p in group], sizes)
tex.aspect_adjust_width = True
expected = [
(2, 1, 0),
(3, 3, 0),
]
tex.generate_tex_coords(group)
for p, b in zip(group, expected):
self.assertVector(p.size, b)
for size, p in zip(sizes, group):
p.size = size
self.assertEqual([tuple(p.size) for p in group], sizes)
tex.aspect_adjust_width = False
tex.aspect_adjust_height = True
expected = [
(1, 0.5, 0),
(2, 2, 0),
]
tex.generate_tex_coords(group)
for p, b in zip(group, expected):
self.assertVector(p.size, b)
def test_invalid_args(self):
from lepton.texturizer import SpriteTexturizer
self.assertRaises(TypeError, SpriteTexturizer, 0, object())
self.assertRaises(TypeError, SpriteTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], object())
self.assertRaises(ValueError, SpriteTexturizer, 0, [])
self.assertRaises(ValueError, SpriteTexturizer, 0, [(0, 0)])
self.assertRaises(ValueError, SpriteTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], [])
self.assertRaises(ValueError, SpriteTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], [-1])
self.assertRaises(ValueError, SpriteTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], [1, 1])
self.assertRaises(ValueError,
SpriteTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0)], [1, -1])
if pyglet is not None:
def _glGet(self, what):
result = (ctypes.c_int * 1)()
glGetIntegerv(what, result)
return result[0]
def test_set_state_restore_state(self):
from lepton.texturizer import SpriteTexturizer
texture = (ctypes.c_uint * 1)()
glGenTextures(1, texture)
glDisable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, 0)
sprite_tex = SpriteTexturizer(texture[0])
self.failIf(self._glGet(GL_TEXTURE_2D))
self.assertEqual(self._glGet(GL_TEXTURE_BINDING_2D), 0)
sprite_tex.set_state()
self.failUnless(self._glGet(GL_TEXTURE_2D))
self.assertEqual(self._glGet(GL_TEXTURE_BINDING_2D), texture[0])
sprite_tex.restore_state()
self.failIf(self._glGet(GL_TEXTURE_2D))
class FlipBookTexturizerTest(TexTestBase, unittest.TestCase):
def test_2D_single_duration_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5),
(0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5),
(0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1),
(0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1),
]
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=0.1,
)
self.failUnless(fbtex.loop)
self.assertAlmostEqual(fbtex.duration, 0.1)
self.assertEqual(fbtex.tex_dimension, 2)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age
age += 0.06
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(coords[:8], coord_sets[0])
self.assertEqual(coords[8:16], coord_sets[0])
self.assertEqual(coords[16:24], coord_sets[1])
self.assertEqual(coords[24:32], coord_sets[1])
self.assertEqual(coords[32:40], coord_sets[2])
self.assertEqual(coords[40:48], coord_sets[3])
self.assertEqual(coords[48:56], coord_sets[3])
self.assertEqual(coords[56:64], coord_sets[0])
self.assertEqual(coords[64:72], coord_sets[0])
self.assertEqual(coords[72:80], coord_sets[1])
# Next frame
group.update(0.05)
coords = tuple(fbtex.generate_tex_coords(group))
self.assertEqual(coords[:8], coord_sets[0])
self.assertEqual(coords[8:16], coord_sets[1])
self.assertEqual(coords[16:24], coord_sets[1])
self.assertEqual(coords[24:32], coord_sets[2])
self.assertEqual(coords[32:40], coord_sets[2])
self.assertEqual(coords[40:48], coord_sets[3])
self.assertEqual(coords[48:56], coord_sets[0])
self.assertEqual(coords[56:64], coord_sets[0])
self.assertEqual(coords[64:72], coord_sets[1])
self.assertEqual(coords[72:80], coord_sets[1])
def test_2D_single_duration_no_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5),
(0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5),
(0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1),
(0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1),
]
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=0.03,
loop=False,
)
self.failIf(fbtex.loop)
self.assertAlmostEqual(fbtex.duration, 0.03)
group = self._make_group(10)
age = 0.0
for i, p in enumerate(group):
p.age = i * 0.016
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
self.assertEqual(coords[:8], coord_sets[0])
self.assertEqual(coords[8:16], coord_sets[0])
self.assertEqual(coords[16:24], coord_sets[1])
self.assertEqual(coords[24:32], coord_sets[1])
self.assertEqual(coords[32:40], coord_sets[2])
self.assertEqual(coords[40:48], coord_sets[2])
self.assertEqual(coords[48:56], coord_sets[3])
self.assertEqual(coords[56:64], coord_sets[3])
self.assertEqual(coords[64:72], coord_sets[3])
self.assertEqual(coords[72:80], coord_sets[3])
# Next frame
group.update(0.02)
coords = tuple(fbtex.generate_tex_coords(group))
self.assertEqual(coords[:8], coord_sets[0])
self.assertEqual(coords[8:16], coord_sets[1])
self.assertEqual(coords[16:24], coord_sets[1])
self.assertEqual(coords[24:32], coord_sets[2])
self.assertEqual(coords[32:40], coord_sets[2])
self.assertEqual(coords[40:48], coord_sets[3])
self.assertEqual(coords[48:56], coord_sets[3])
self.assertEqual(coords[56:64], coord_sets[3])
self.assertEqual(coords[64:72], coord_sets[3])
self.assertEqual(coords[72:80], coord_sets[3])
def test_2D_duration_list_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5),
(0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5),
(0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1),
(0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1),
]
durations = (0.12, 0.3, 0.2, 0.15)
times = []
t = 0
for d in durations:
t += d
times.append(t)
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=durations,
)
self.failUnless(fbtex.loop)
for d, expected in zip(fbtex.duration, durations):
self.assertAlmostEqual(d, expected)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age % 2.0
age += 0.7
for f in range(5):
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
i = 0
for p, t in zip(group, times):
age = p.age % times[-1]
c = 0
while c < 3 and age > times[c]:
c += 1
self.assertEqual(coords[i:i + 8], coord_sets[c], "f=%s i=%s c=%s age=%s: %s != %s" %
(f, i, c, p.age, coords[i:i + 8], coord_sets[c]))
i += 8
group.update(0.2)
def test_2D_duration_list_no_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5),
(0.5, 0, 1, 0, 1, 0.5, 0.5, 0.5),
(0, 0.5, 0.5, 0.5, 0.5, 1, 0, 1),
(0.5, 0.5, 1, 0.5, 1, 1, 0.5, 1),
]
durations = (0.5, 0.25, 0.3, 0.4)
times = []
t = 0
for d in durations:
t += d
times.append(t)
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=durations,
loop=False,
)
self.failIf(fbtex.loop)
for d, expected in zip(fbtex.duration, durations):
self.assertAlmostEqual(d, expected, 6)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age % 2.0
age += 0.7
for f in range(5):
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 8, (len(coords), len(group)))
i = 0
for p, t in zip(group, times):
c = 0
while c < 3 and p.age > times[c]:
c += 1
self.assertEqual(coords[i:i + 8], coord_sets[c], "f=%s i=%s c=%s age=%s: %s != %s" %
(f, i, c, p.age, coords[i:i + 8], coord_sets[c]))
i += 8
group.update(0.2)
def test_default_r_coords(self):
from lepton.texturizer import FlipBookTexturizer
fbtex = FlipBookTexturizer(0,
coords=[(0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5)],
duration=1,
dimension=3)
self.assertEqual(fbtex.tex_dimension, 3)
coords = fbtex.tex_coords
self.assertEqual(coords, ((0, 0, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0),))
fbtex = FlipBookTexturizer(0,
coords=[((0.5, 0), (1, 0), (1, 0.5), (0.5, 0.5))],
duration=1,
dimension=3)
self.assertEqual(fbtex.tex_dimension, 3)
coords = fbtex.tex_coords
self.assertEqual(coords, ((0.5, 0, 0, 1, 0, 0, 1, 0.5, 0, 0.5, 0.5, 0),))
def test_3D_single_duration_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0),
(0, 0, 0.5, 1, 0, 0.5, 1, 1, 0.5, 0, 1, 0.5),
(0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1),
]
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=0.1,
dimension=3,
)
self.assertEqual(fbtex.tex_dimension, 3)
self.assertAlmostEqual(fbtex.duration, 0.1)
self.failUnless(fbtex.loop)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age % 0.4
age += 0.07
times = [0.1, 0.2, 0.3]
for f in range(5):
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 12, (len(coords), len(group)))
i = 0
for p, t in zip(group, times):
age = p.age % times[-1]
c = 0
while c < 2 and age > times[c]:
c += 1
self.assertEqual(coords[i:i + 12], coord_sets[c], "f=%s i=%s c=%s age=%s: %s != %s" %
(f, i, c, age, coords[i:i + 12], coord_sets[c]))
i += 12
group.update(0.04)
def test_3D_single_duration_no_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0),
(0, 0, 0.5, 1, 0, 0.5, 1, 1, 0.5, 0, 1, 0.5),
(0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1),
]
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=0.12,
dimension=3,
loop=False,
)
self.assertEqual(fbtex.tex_dimension, 3)
self.assertAlmostEqual(fbtex.duration, 0.12)
self.failIf(fbtex.loop)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age % 0.4
age += 0.07
times = [0.12, 0.24, 0.36]
for f in range(5):
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 12, (len(coords), len(group)))
i = 0
for p, t in zip(group, times):
c = 0
while c < 2 and p.age > times[c]:
c += 1
self.assertEqual(coords[i:i + 12], coord_sets[c], "f=%s i=%s c=%s age=%s: %s != %s" %
(f, i, c, p.age, coords[i:i + 12], coord_sets[c]))
i += 12
group.update(0.055)
def test_3D_duration_list_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0),
(0, 0, 0.5, 1, 0, 0.5, 1, 1, 0.5, 0, 1, 0.5),
(0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1),
]
durations = [0.7, 0.3, 0.5]
times = []
t = 0
for d in durations:
t += d
times.append(t)
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=durations,
dimension=3,
)
self.assertEqual(fbtex.tex_dimension, 3)
self.failUnless(fbtex.loop)
for d, expected in zip(fbtex.duration, durations):
self.assertAlmostEqual(d, expected, 6)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age % 0.4
age += 0.07
for f in range(5):
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 12, (len(coords), len(group)))
i = 0
for p, t in zip(group, times):
age = p.age % times[-1]
c = 0
while c < 2 and age > times[c]:
c += 1
self.assertEqual(coords[i:i + 12], coord_sets[c], "f=%s i=%s c=%s age=%s: %s != %s" %
(f, i, c, age, coords[i:i + 12], coord_sets[c]))
i += 12
group.update(0.11)
def test_3D_duration_list_no_loop(self):
from lepton.texturizer import FlipBookTexturizer
coord_sets = [
(0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0),
(0, 0, 0.5, 1, 0, 0.5, 1, 1, 0.5, 0, 1, 0.5),
(0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1),
]
durations = [0.4, 0.4, 0.5]
times = []
t = 0
for d in durations:
t += d
times.append(t)
fbtex = FlipBookTexturizer(0,
coords=coord_sets,
duration=durations,
dimension=3,
loop=False,
)
self.assertEqual(fbtex.tex_dimension, 3)
self.failIf(fbtex.loop)
for d, expected in zip(fbtex.duration, durations):
self.assertAlmostEqual(d, expected, 6)
group = self._make_group(10)
age = 0.0
for p in group:
p.age = age % 0.5
age += 0.07
for f in range(5):
coords = tuple(fbtex.generate_tex_coords(group))
self.failUnless(len(coords) >= len(group) * 12, (len(coords), len(group)))
i = 0
for p, t in zip(group, times):
c = 0
while c < 2 and p.age > times[c]:
c += 1
self.assertEqual(coords[i:i + 12], coord_sets[c], "f=%s i=%s c=%s age=%s: %s != %s" %
(f, i, c, p.age, coords[i:i + 12], coord_sets[c]))
i += 12
group.update(0.17)
def test_invalid_args(self):
from lepton.texturizer import FlipBookTexturizer
self.assertRaises(TypeError, FlipBookTexturizer, 0, object(), 1)
self.assertRaises(TypeError, FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], object())
self.assertRaises(ValueError, FlipBookTexturizer, 0, [], 1)
self.assertRaises(ValueError, FlipBookTexturizer, 0, [(0, 0)], 1)
self.assertRaises(ValueError, FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], 0)
self.assertRaises(ValueError, FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], -1)
self.assertRaises(ValueError, FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0)], [])
self.assertRaises(ValueError,
FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0)], [1, -1])
self.assertRaises(ValueError,
FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0)], [1, 1], dimension=0)
self.assertRaises(ValueError,
FlipBookTexturizer, 0, [(0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0)], [1, 1], dimension=4)
if pyglet is not None:
def _glGet(self, what):
result = (ctypes.c_int * 1)()
glGetIntegerv(what, result)
return result[0]
def test_2D_set_state_restore_state(self):
from lepton.texturizer import FlipBookTexturizer
texture = (ctypes.c_uint * 1)()
glGenTextures(1, texture)
glDisable(GL_TEXTURE_2D)
glDisable(GL_TEXTURE_3D)
glBindTexture(GL_TEXTURE_2D, 0)
sprite_tex = FlipBookTexturizer(texture[0], [(0, 0, 0, 0, 0, 0, 0, 0)], 1)
self.assertEqual(sprite_tex.tex_dimension, 2)
self.failIf(self._glGet(GL_TEXTURE_2D))
self.failIf(self._glGet(GL_TEXTURE_3D))
self.assertEqual(self._glGet(GL_TEXTURE_BINDING_2D), 0)
sprite_tex.set_state()
self.failUnless(self._glGet(GL_TEXTURE_2D))
self.failIf(self._glGet(GL_TEXTURE_3D))
self.assertEqual(self._glGet(GL_TEXTURE_BINDING_2D), texture[0])
sprite_tex.restore_state()
self.failIf(self._glGet(GL_TEXTURE_2D))
self.failIf(self._glGet(GL_TEXTURE_3D))
def test_3D_set_state_restore_state(self):
from lepton.texturizer import FlipBookTexturizer
texture = (ctypes.c_uint * 1)()
glGenTextures(1, texture)
glDisable(GL_TEXTURE_2D)
glDisable(GL_TEXTURE_3D)
glBindTexture(GL_TEXTURE_3D, 0)
sprite_tex = FlipBookTexturizer(texture[0], [(0, 0, 0, 0, 0, 0, 0, 0)], 1, dimension=3)
self.assertEqual(sprite_tex.tex_dimension, 3)
self.failIf(self._glGet(GL_TEXTURE_2D))
self.failIf(self._glGet(GL_TEXTURE_3D))
self.assertEqual(self._glGet(GL_TEXTURE_BINDING_3D), 0)
sprite_tex.set_state()
self.failUnless(self._glGet(GL_TEXTURE_3D))
self.failIf(self._glGet(GL_TEXTURE_2D))
self.assertEqual(self._glGet(GL_TEXTURE_BINDING_3D), texture[0])
sprite_tex.restore_state()
self.failIf(self._glGet(GL_TEXTURE_2D))
self.failIf(self._glGet(GL_TEXTURE_3D))
if __name__ == '__main__':
unittest.main()
|
# 383 - Ransom Note (Easy)
# https://leetcode.com/problems/ransom-note/
from collections import Counter
class Solution(object):
def canConstruct(self, ransom_note, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
# Is it possible to build a certain ransom note starting from the letters in a magazine?
counter_ransom = Counter(ransom_note)
counter_magazine = Counter(magazine)
valid = all(counter_magazine[char] >= counter_ransom[char] for char in list(ransom_note))
return valid
|
#! /usr/bin/python
from urlparse import urlparse
def fullPath(baseUrl, link):
# converts baseUrl string to ParseResult (urlparse object)
# for constructing simple links we can ignore everything after
# last slash, i.e. on http://test.com/super.ext?mag=ic
# relative links are constructed with http://test.com/ prefix
baseUrl = urlparse(baseUrl[:baseUrl.rfind('/') + 1])
if link.startswith('http'):
'''Full link'''
return link
if link.startswith('javascript'):
'''We are not interested in following these links'''
return None
if link.startswith('/'):
return "http://" + baseUrl.netloc + link
return baseUrl.geturl() + link
|
#TODO the NCDEnterprise library requires the digi Xbee library from:
#https://github.com/digidotcom/python-xbee
# someday I may make an install package, but today is not that day.
from ncd_enterprise import NCDEnterprise
#TODO Change this line to your Serial Port
# SERIAL_PORT = "/dev/tty.usbserial-AC4CF4AA"
SERIAL_PORT = "/dev/cu.usbserial-AC4CF4AA"
BAUD_RATE = 115200
#this is function is the callback that I pass into the NCDEnterprise module during
#instantiation. The module uses the Digi XBee module which runs on another thread.
def my_custom_callback(sensor_data):
if sensor_data['sensor_type_id'] is 40:
csv_dict = restructure_data(sensor_data.get('sensor_data'))
print('data acquired')
csv_file = open('~/vibration_data.csv', 'a+')
csv_file.write(csv_dict.get('rms_x_csv')+"\r")
csv_file.write(csv_dict.get('rms_y_csv')+"\r")
csv_file.write(csv_dict.get('rms_z_csv')+"\r\r")
csv_file.close()
def restructure_data(data):
r_data = {'rms_x_csv': '\"RMS_X\",', 'rms_y_csv': '\"RMS_Y\",', 'rms_z_csv': '\"RMS_Z\",'}
for sample in data:
r_data['rms_x_csv'] += '\"'+str(data.get(sample)['rms_x']) +'\",'
r_data['rms_y_csv'] += '\"'+str(data.get(sample)['rms_y']) +'\",'
r_data['rms_z_csv'] += '\"'+str(data.get(sample)['rms_z']) +'\",'
return r_data
#instantiate the NCDEnterprise Object and pass in the Serial Port, Baud Rate,
# and Function/Method object
ncdModem = NCDEnterprise(SERIAL_PORT, BAUD_RATE, my_custom_callback)
# print(ncdModem.device.serial_port.rts)
|
# Anders Poirel
# 12-03-2020
#
# https://leetcode.com/problems/generate-parentheses/
#
# Runtime: 28 ms, faster than 89.93% of Python3 online submissions for Generate Parentheses.
# Memory Usage: 13.2 MB, less than 91.11% of Python3 online submissions for Generate Parentheses.
class Solution:
def build_sol(self, nleft, nright, curr, sols, n):
print(curr)
if nright > nleft or nleft > n:
return
elif len(curr) == n*2 - 2:
sols.append("(" + curr + ")")
print("(" + curr + ")")
else:
print("Recursive call")
self.build_sol(nleft, nright+1, curr + ")", sols, n)
self.build_sol(nleft+1, nright, curr + "(", sols, n)
def generateParenthesis(self, n: int) -> List[str]:
sols = []
self.build_sol(1, 1, ")", sols, n)
self.build_sol(2, 0, "(", sols, n)
return sols
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vendors', '0075_auto_20160421_0404'),
]
operations = [
migrations.RemoveField(
model_name='insuranceverification',
name='remind_me',
),
migrations.AlterField(
model_name='insuranceverification',
name='expiry_date',
field=models.DateField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='insuranceverification',
name='extent',
field=models.IntegerField(null=True, choices=[(1, '\u20ac2.6 million'), (2, '\u20ac6.5 million'), (3, 'Other (please specify)')]),
preserve_default=True,
),
]
|
import os
import pygame
class ResourceManager:
__instance = None
def __init__(self, path):
self.__base_path = os.path.dirname(path) + "/res/"
self.__fonts = {}
self.__graphics = {}
self.__music = {}
self.__sounds = {}
for file in os.listdir(self.__base_path + "fonts/"):
key = "fonts/" + os.path.splitext(os.path.basename(file))[0]
self.__fonts[key] = os.path.join(self.__base_path, "fonts/" + file)
for file in os.listdir(self.__base_path + "graphics/"):
key = "graphics/" + os.path.splitext(os.path.basename(file))[0]
surface = pygame.image.load(os.path.join(self.__base_path, "graphics/" + file))
self.__graphics[key] = surface
for file in os.listdir(self.__base_path + "music/"):
key = "music/" + os.path.splitext(os.path.basename(file))[0]
music = pygame.mixer.Sound(os.path.join(self.__base_path, "music/" + file))
self.__music[key] = music
for file in os.listdir(self.__base_path + "sounds/"):
key = "sounds/" + os.path.splitext(os.path.basename(file))[0]
sound = pygame.mixer.Sound(os.path.join(self.__base_path, "sounds/" + file))
self.__sounds[key] = sound
ResourceManager.__instance = self
@staticmethod
def get_instance():
return ResourceManager.__instance
def get_font(self, key, size):
return pygame.font.Font(self.__fonts[key], size)
def get_image(self, key):
return self.__graphics[key]
def get_music(self, key):
return self.__music[key]
def get_sound(self, key):
return self.__sounds[key]
|
import gamry_parser as parser
import unittest
class TestVFP600(unittest.TestCase):
def setUp(self):
pass
def test_load(self):
gp = parser.VFP600()
self.assertFalse(gp.loaded)
gp.load("tests/vfp600_data.dta")
self.assertEqual(gp.fname, "tests/vfp600_data.dta")
self.assertTrue("VFP600" in gp.get_header()["TAG"])
# data file acq frequency = 15hz
self.assertEqual(gp.get_sample_time(), 1 / 15)
self.assertEqual(gp.get_curve_count(), 1)
self.assertEqual(gp.get_sample_count(), 20)
self.assertTrue(gp.loaded)
def test_getters(self):
gp = parser.VFP600()
gp.load("tests/vfp600_data.dta")
curve = gp.get_curve_data()
self.assertTrue((curve.columns == ["T", "Voltage", "Current"]).all())
self.assertEqual(curve["T"][0], 0)
self.assertEqual(round(curve["T"].iloc[-1] * 100), 127)
self.assertEqual(curve["Voltage"].iloc[-1], 0.033333)
self.assertEqual(round(curve["Current"].iloc[-1] * 1e13), 5125)
|
# This logic is to help convert address: str to address: bytes
# copied from https://github.com/cosmos/cosmos-sdk/blob/main/types/address.go
from typing import List
import bech32
def address_to_bytes(address: str) -> bytes:
_prefix, b = bech32.bech32_decode(address)
b = from_words(b)
return bytes(b)
def bytes_to_address(b: bytes, prefix: str = "secret") -> str:
b = to_words(b)
res = bech32.bech32_encode(prefix, b)
return res
def from_words(words: List[int]) -> List[int]:
res = convert(words, 5, 8, False)
if res is None:
raise ValueError(res)
return res
def to_words(bytes: List[int]) -> List[int]:
return convert(bytes, 8, 5, True)
def convert(data: List[int], inBits: int, outBits: int, pad: bool = False) -> List[int]:
value = 0
bits = 0
maxV = (1 << outBits) - 1
result = []
for i in range(len(data)):
value = (value << inBits) | data[i]
bits += inBits
while bits >= outBits:
bits -= outBits
result.append((value >> bits) & maxV)
if pad:
if bits > 0:
result.append((value << (outBits - bits)) & maxV)
else:
if bits >= inBits:
raise ValueError("Excess Padding")
if ((value << outBits - bits)) & maxV:
raise ValueError("Non-zero padding")
return result
|
import json
import six
def iterate_dict(dictionary, parents=[]):
"""
This function iterates over one dict and returns a list of tuples: (list_of_keys, value)
Usefull for looping through a multidimensional dictionary.
"""
ret = []
for key, value in six.iteritems(dictionary):
if isinstance(value, dict):
ret.extend(iterate_dict(value, parents + [str(key)]))
elif isinstance(value, list):
ret.append((parents + [str(key)], value))
else:
ret.append((parents + [str(key)], value))
return ret
def unnest_json(row_obj):
"""
Iterates over a JSON to tranfsorm each element into a column.
Example:
{'a': {'b': 'c'}} -> {'a.b': 'c'}
"""
row = {}
for keys, value in iterate_dict(row_obj):
row[".".join(keys)] = value if value is not None else ''
return row
def log(*args):
for thing in args:
if type(thing) is dict:
thing = json.dumps(thing)
print('Salesforce plugin - %s' % thing)
|
import os
class Config():
ENV_AWS_DEFAULT_REGION = 'AWS_DEFAULT_REGION'
ENV_NGROK_BASE_AGENT_URL = 'NGROK_BASE_AGENT_URL'
ENV_SSM_PREFIX = 'SSM_PREFIX'
defaults = {
ENV_AWS_DEFAULT_REGION: 'us-east-2',
ENV_NGROK_BASE_AGENT_URL: 'http://localhost:4040/api',
ENV_SSM_PREFIX: '/ngrok_domains',
}
def __init__(self):
self.cache = {
self.ENV_AWS_DEFAULT_REGION: os.environ.get(self.ENV_AWS_DEFAULT_REGION),
self.ENV_NGROK_BASE_AGENT_URL: os.environ.get(self.ENV_NGROK_BASE_AGENT_URL),
self.ENV_SSM_PREFIX: os.environ.get(self.ENV_SSM_PREFIX)
}
for key, value in self.cache.items():
if (value == None or value == '') and key in self.defaults:
self.cache[key] = self.defaults[key]
if self.cache[self.ENV_SSM_PREFIX].endswith('/'):
self.cache[self.ENV_SSM_PREFIX] = self.cache[self.ENV_SSM_PREFIX][:-1]
def get(self, keyName):
if not keyName in self.cache:
return None
return self.cache[keyName]
|
import debug
import tensorflow as tf
import time
import os
from .. import data
from .. import error
from .. import helpers
from .. import internal
from .. import network
from .. import checkpoint
from .. import layer
class CTrainer(internal.CBaseRunner):
def __init__(self, Network, Reader, ErrorMeasurement, Settings = None):
super().__init__(Settings)
debug.Assert(isinstance(Network, network.CNetwork), "You must specify a Network object.")
debug.Assert(isinstance(Reader, data.CReader), "You must specify a Reader object.")
debug.Assert(isinstance(ErrorMeasurement, error.CMeasurement), "You must specify an ErrorMeasurement object.")
self._Network = Network
self._Reader = Reader
self._ErrorMeasurement = ErrorMeasurement
self._Printer = None
self._SummaryMerger = None
self._IsReady = False
self._OptimizerStep = None
self._prepare(self._Settings)
def _prepare(self, Settings):
if not self._IsReady:
self._OptimizerStep = self._createOptimizer(self._ErrorMeasurement, Settings)
Variables, Tensors = helpers.getTrainableVariables()
print("Current Model has {} parameters in {} trainable tensors.".format(Variables, Tensors))
self.reset(self.getCheckpointDir())
self._Summary = tf.summary.merge_all()
self._IsReady = True
def train(self, NumberOfEpochs = None):
Session = self._Session
# Init Writer is necessary
TrainWriter = None
ValWriter = None
if self._SummaryDir != None:
print("Store tensorboard summary at directory {}".format(self._SummaryDir))
TrainWriter = tf.summary.FileWriter(os.path.join(self._SummaryDir, "train"))
TrainWriter.add_graph(Session.graph)
ValWriter = tf.summary.FileWriter(os.path.join(self._SummaryDir, "val"))
ValWriter.add_graph(Session.graph)
else:
print("Do not store any summary")
# Store settings
if not os.path.exists(self.getCheckpointDir()):
os.makedirs(self.getCheckpointDir())
Filename = os.path.join(self.getCheckpointDir(), "train.cfg")
print("Store training settings in file {}".format(Filename))
with open(Filename, "w") as File:
File.write(str(self._Settings))
# Start queues
QueueCoordinage = tf.train.Coordinator()
tf.train.start_queue_runners(sess=Session, coord=QueueCoordinage)
# Calculate number of epochs to run
MaxEpochs = self.getMaxEpochs() - self._EpochCount
if NumberOfEpochs != None:
MaxEpochs = min([NumberOfEpochs, MaxEpochs])
# Setup Printer
if self._Printer != None:
self._Printer.setupTraining(self.getMaxEpochs())
# Loop Preparation
BatchSize = self._Reader.getBatchSize()
Epoch = self._EpochCount
IterationsPerEpoch = helpers.getIterationsPerEpoch(self.getEpochSize(), BatchSize)
Iteration = Epoch * IterationsPerEpoch
print("Run training for {} epochs beginning with epoch {} and {} iterations per epoch.".format(MaxEpochs, self._EpochCount, IterationsPerEpoch))
# Initial Eval Step
StartTime = time.time()
SummaryResult, OtherResults = self._internalEvalStep(Session, Iteration, 0, Epoch)
Writer = TrainWriter
if Epoch > 0:
# Do not write to summary, since is has already been written by the training before
Writer = None
self._postEpochAction(Writer, SummaryResult, OtherResults, StartTime, Iteration, Epoch, BatchSize)
SummaryResult = self._internalValidationStep(Session, Iteration, 0, Epoch)
Writer = ValWriter
if Epoch > 0:
# Do not write to summary, since is has already been written by the training before
Writer = None
self._postValidationAction(Writer, SummaryResult, Iteration, Epoch, BatchSize)
# Training Loop
StartTime = time.time()
for EpochNumber in range(MaxEpochs):
Epoch = EpochNumber + self._EpochCount + 1
SampleCount = 0
for Batch in range(IterationsPerEpoch):
Iteration += 1
SampleCount += BatchSize
self._printTrainingBar(20, Iteration, Epoch, Batch, IterationsPerEpoch, True)
self._internalTrainStep(Session, Iteration, Batch, Epoch)
SummaryResult, OtherResults = self._internalEvalStep(Session, Iteration, 0, Epoch)
StartTime = self._postEpochAction(TrainWriter, SummaryResult, OtherResults, StartTime, Iteration, Epoch, SampleCount)
SummaryResult = self._internalValidationStep(Session, Iteration, 0, Epoch)
self._postValidationAction(ValWriter, SummaryResult, Iteration, Epoch, BatchSize)
self._saveCheckpoint(Epoch, EpochNumber == MaxEpochs)
self._EpochCount = Epoch
# Stop queues
QueueCoordinage.request_stop()
QueueCoordinage.join()
# Close writer
if TrainWriter != None:
TrainWriter.close()
# Close writer
if ValWriter != None:
ValWriter.close()
def _internalEvalStep(self, Session, Iteration, Batch, Epoch):
RunTargets = [self._Summary]
RawResults = list(self._trainIteration(Session, RunTargets, self._Reader, Iteration, Batch, Epoch))
SummaryResult = RawResults[0]
if len(RawResults) > 1:
OtherResults = RawResults[1:]
else:
OtherResults = []
return SummaryResult, OtherResults
def _internalValidationStep(self, Session, Iteration, Batch, Epoch):
RunTargets = [self._Summary]
IsTraining = self._Reader.IsTraining
self._Reader.IsTraining = False
#print("Validate {} Iterations...".format(self.getValidationIterations(self._Settings)))
IterationsPerStep = self.getValidationIterations(self._Settings)
for i in range(IterationsPerStep):
self._printTrainingBar(20, Iteration, Epoch, i, IterationsPerStep, False)
RawResults = list(self._trainIteration(Session, RunTargets, self._Reader, Iteration, Batch, Epoch))
SummaryResult = RawResults[0]
if self._SummaryMerger != None:
self._SummaryMerger.add(SummaryResult)
self._Reader.IsTraining = IsTraining
if self._SummaryMerger != None:
SummaryResult = self._SummaryMerger.merge()
return SummaryResult
def _internalTrainStep(self, Session, Iteration, Batch, Epoch):
RunTargets = [self._OptimizerStep]
RawResults = list(self._trainIteration(Session, RunTargets, self._Reader, Iteration, Batch, Epoch))
return RawResults
def _postValidationAction(self, Writer, Summary, Iteration, Epoch, SampleCount):
if (Summary != None) and (Writer != None):
Writer.add_summary(Summary, Epoch)
if self._Printer != None:
self._Printer.printValidationUpdate(Summary, Iteration, Epoch, SampleCount)
def _postEpochAction(self, Writer, Summary, OtherResults, StartTime, Iteration, Epoch, SampleCount):
Duration = (time.time() - StartTime)
StartTime = time.time()
if (Summary != None) and (Writer != None):
Writer.add_summary(Summary, Epoch)
if self._Printer != None:
self._Printer.printEpochUpdate(Summary, Iteration, Epoch, Duration, SampleCount)
return StartTime
def _saveCheckpoint(self, Epoch, IsForceSave = False):
EpochsUntilCheckpoint = self.getEpochsUntilCheckpoint()
IsSave = False
if EpochsUntilCheckpoint != None:
if Epoch % EpochsUntilCheckpoint == 0:
IsSave = True
if IsSave or IsForceSave:
self.saveModel(self.getCheckpointDir(), Epoch)
def restore(self, Epoch=None):
if Epoch is None:
CheckpointFile = checkpoint.getLatestCheckpointFile(self.getCheckpointDir())
else:
CheckpointFile = checkpoint.getCheckpointFilename(self.getCheckpointDir(), Epoch)
debug.Assert(CheckpointFile != None, "Cannot find checkpoint file {}.".format(CheckpointFile))
super().restore(CheckpointFile)
def getMaxEpochs(self):
return self._getMaxEpochs(self._Settings)
def getEpochSize(self):
return self._getEpochSize(self._Settings)
def getCheckpointDir(self):
return self._getCheckpointDir(self._Settings)
def getEpochsUntilCheckpoint(self):
return self._getEpochsUntilCheckpoint(self._Settings)
def _trainIteration(self, Session, RunTargets, Reader, Iteration, Batch, Epoch):
raise Exception("You have to overwride this method and run a training iteration inside.")
# Return the results here
return None, None
def _createOptimizer(self, ErrorMeasurement, Settings):
raise Exception("You have to overwride this method and create an optimizer step to return.")
return None
def _getMaxEpochs(self, Settings):
# You have to overwride this method to return the maximum number of epochs.
return Settings['Trainer']['NumberOfEpochs']
def _getEpochSize(self, Settings):
# You have to overwride this method to return the epoch size.
return Settings['Trainer']['EpochSize']
def _getSummaryDir(self, Settings):
# You can overrite this function to specify a summary directory
if 'Trainer' in Settings:
if 'SummaryPath' in Settings['Trainer']:
return Settings['Trainer']['SummaryPath']
return None
def _getCheckpointDir(self, Settings):
# You can overrite this function to specify a checkpoint directory
if 'Trainer' in Settings:
if 'CheckpointPath' in Settings['Trainer']:
return os.path.join(Settings['Trainer']['CheckpointPath'], "State_{}".format(self._Network.State))
return None
def _getEpochsUntilCheckpoint(self, Settings):
# You can overrite this function to specify the number of epochs until a checkpoint is stored
if 'Trainer' in Settings:
if 'CheckpointEpochs' in Settings['Trainer']:
return Settings['Trainer']['CheckpointEpochs']
return None
def getValidationIterations(self, Settings):
# You can overrite this function to specify the number of epochs until a checkpoint is stored
if 'Validation' in Settings:
if 'Samples' in Settings['Validation']:
return int(Settings['Validation']['Samples']/self._Reader.getBatchSize())
return 1
def _printTrainingBar(self, BarSize, Iteration, Epoch, Batch, IterationsPerEpoch, IsTraining=True):
Percent = Batch/IterationsPerEpoch
Bar = '.' * int((BarSize*Percent))
BarString = str("{:<"+str(BarSize)+"}").format(Bar)
if IsTraining:
Prefix = str("Training Epoch {}").format(Epoch)
else:
Prefix = str("Validation Epoch {}").format(Epoch)
print("\r{:>8}: ({}) [{}] - {} / {}".format(Iteration, Prefix, BarString, Batch, IterationsPerEpoch), end='', flush=True)
print("\r{:>8}: ({}) [{}] - {} / {}".format(Iteration, Prefix, BarString, Batch, IterationsPerEpoch), end='', flush=True)
if Batch >= (IterationsPerEpoch-1):
print("\r", end='', flush=True)
def _applyNoise(self, Gradients, GradientNoise):
if GradientNoise is not None and GradientNoise > 0.0:
NoiseLevel = GradientNoise / (tf.sqrt(tf.cast((CurrentOptimizationStep + 1), tf.float32)))
NoisyGradients = []
print("Apply noise to gradients (nu = {})...".format(GradientNoise))
# Taken from: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/optimizers.py
for Gradient, Variable in Gradients:
if Gradient is not None:
if isinstance(Gradient, tf.IndexedSlices):
GradientShape = Gradient.dense_shape
else:
GradientShape = Gradient.get_shape()
Noise = tf.truncated_normal(GradientShape) * NoiseLevel
Gradient += Noise
NoisyGradients.append((Gradient, Variable))
else:
NoiseLevel = 0
NoisyGradients = Gradients
tf.summary.scalar("NoiseLevel", NoiseLevel)
return NoisyGradients
def _applyIndiviualLearningRates(self, Gradients):
print("Apply individual learning rate scales...")
ScaledGradients = []
for Gradient, Variable in Gradients:
Scale = layer.LearningRates.get(Variable.name)
if Scale != None:
Gradient *= Scale
print(" * \"{}\" has scale {}".format(Variable.name, Scale))
ScaledGradients.append((Gradient, Variable))
return ScaledGradients
def _addSumGradientSummary(self, Gradients):
Sum = 0.0
for Gradient, Variable in Gradients:
Sum += tf.norm(Gradient)
tf.summary.scalar("GradientNorm", Sum)
def _addSingleGradientSummary(self, Gradients):
for Gradient, Variable in Gradients:
tf.summary.scalar(Variable.name, tf.norm(Gradient))
def _addGradientNoiseSummary(self, Gradients, NoisyGradients):
for i, (Gradients, Variable) in enumerate(Gradients):
tf.summary.scalar(Variable.name, tf.norm(NoisyGradients[i][0]) - tf.norm(Gradients)) |
from abc import ABC, abstractmethod
from typing import List, Optional
import torch
from .options import OptionBridge
from .registry import Registry
class BaseConfig(ABC):
def __init__(self):
self._params_list = None
@Registry("base_config")
def _named_params(self) -> List[str] :
self._params_list = {key:value for key, value in vars(self).items() if not key.startswith('_')}
return self._params_list
def params(self):
return {key: getattr(self,key)() for key in self._params_list}
class ModelConfig(BaseConfig):
def __init__(self, input_size: Optional[int] = 20000, hidden_size : Optional[int] = 256,
num_layers: Optional[int] = 2,
dropout_p: Optional[float] = 0.4):
super(ModelConfig, self).__init__()
self.input_size = OptionBridge[int]("input_size", input_size,
desc="Size of the input "\
+"layer, this usually is the size of the "\
+ "vocab in case of text modality.",
required=False)
self.hidden_size = OptionBridge[int]("hidden_size", hidden_size,
desc="Size of the"\
+" hidden layer", required=False)
self.num_layers = OptionBridge[int]("num_layers", num_layers, desc="Number of "\
+ " layers of encoder", required=False)
self.dropout_p = OptionBridge[float]("dropout_p", dropout_p,
desc="dropout probability", required=False)
super(ModelConfig, self)._named_params()
class TrainConfig(BaseConfig):
def __init__(self, lr=1.0, gamma=0.9, momentum=0.9, clip_value=0.25,
num_epochs=5, log_interval=10, device='cpu',
checkpoint_dir='./models'):
super(TrainConfig, self).__init__()
self.lr = OptionBridge[float]("lr", lr, desc="learning rate", required=False)
self.gamma = OptionBridge[float]("gamma", gamma, desc="gamma for lr scheduler",
required=False)
self.momentum = OptionBridge[float]("momentum", momentum, desc="Momentum value",
required=False)
self.clip_value = OptionBridge[float]("clip_value", clip_value,
desc="clip value for grad", required=False)
self.num_epochs = OptionBridge[int]("num_epochs", num_epochs,
desc="Number of epochs", required=False)
self.log_interval = OptionBridge[int]("log_interval", log_interval,
desc="Interval between logs",
required=False)
self.device = OptionBridge[str]("device", device, desc="device: cpu or cuda",
required=False, choices=['cpu', 'cuda'])
self.checkpoint_dir = OptionBridge[str]("checkpoint_dir", checkpoint_dir,
desc="Directory for checkpointing")
super(TrainConfig, self)._named_params()
class DataConfig(BaseConfig):
def __init__(self, data_dir, train_batch_size=24, valid_batch_size=24,
num_workers=0, shuffle_data=True, drop_last=True):
super(DataConfig, self).__init__()
self.data_dir = OptionBridge[str]("data_dir", data_dir,
desc="directory where data is located",
required=True)
self.train_batch_size = OptionBridge[int]("train_batch_size", train_batch_size,
desc="Batch Size for training",
default=24)
self.valid_batch_size = OptionBridge[int]("valid_batch_size", valid_batch_size,
desc="Batch Size for validation data",
default=24)
self.shuffle_data = OptionBridge[bool]("shuffle_data", shuffle_data,
desc="data is shuffled if true",
default=True)
self.drop_last = OptionBridge[bool]("drop_last", drop_last,
desc="left over samples that don't fit "\
+ "in a batch are dropped if true",
default=True)
self.num_workers = OptionBridge[int]("num_workers", num_workers,
desc="Number of workers required "\
+" to load the data",
default= 0)
super(DataConfig, self)._named_params()
|
import numpy as np
import struct
import os
import csv
def hex_to_ieee_fp(hex_str):
hex_str = hex_str.strip()
if hex_str[0:2] in ["0x", "0X"]:
hex_str = hex_str[2:]
return struct.unpack(">f", bytes.fromhex(hex_str))[0]
def str_to_bool(bool_str):
if bool_str.strip() in ["true", "True", "1", "0x1"]:
return True
elif bool_str.strip() in ["false", "False", "0", "0x0"]:
return False
def check_ieee_fp_a_gt_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a > fp_b) != result:
test_successful = False
print("index={}: {} > {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a > fp_a) != result:
test_successful = False
print("index={}: {} > {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
def check_ieee_fp_a_lt_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a < fp_b) != result:
test_successful = False
print("index={}: {} < {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a < fp_a) != result:
test_successful = False
print("index={}: {} < {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
def check_ieee_fp_a_ge_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a >= fp_b) != result:
test_successful = False
print("index={}: {} >= {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a >= fp_a) != result:
test_successful = False
print("index={}: {} >= {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
def check_ieee_fp_a_le_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a <= fp_b) != result:
test_successful = False
print("index={}: {} <= {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a <= fp_a) != result:
test_successful = False
print("index={}: {} <= {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2015 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""jupyter - extensions for the Jupyter/IPython project
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('JupyterTool commandline').split ()
from ... import PKError, cli
from ...io import Path
from ...cli import multitool
import logging, sys
from notebook import notebookapp
class BgNotebookApp (notebookapp.NotebookApp):
def initialize (self, argv=None):
self.port = 0 # => auto-choose port
self.open_browser = False
super (BgNotebookApp, self).initialize (argv)
def _log_level_default (self):
return logging.ERROR
def init_webapp (self):
super (BgNotebookApp, self).init_webapp ()
# Update fields to reflect the port that was actually chosen.
sock = list (self.http_server._sockets.values ())[0]
self.port = sock.getsockname ()[1]
def get_server_cwd ():
return Path ('~').expand (user=True)
def get_server_info ():
servercwd = get_server_cwd ()
for info in notebookapp.list_running_servers ():
if Path (info['notebook_dir']) == servercwd:
return info
return None
# Command-line interface
class BgNotebookCommand (multitool.Command):
name = 'bg-notebook'
argspec = ''
summary = 'Start a standardized notebook server in the background if needed.'
help_if_no_args = False
more_help = '''\
A new server will only be started if necessary. Regardless of whether a server
was started or not, the only thing that will be printed is the base URL at
which the server may be accessed. Unlike the standard setup, the port on which
the server listens will probably not be 8888, because we ask the OS to
determine it automatically. '''
def invoke (self, args, **kwargs):
import os
if len (args):
raise multitool.UsageError ('mynotebook takes no arguments')
# See if there's a running server that we can suggest.
servercwd = get_server_cwd ()
for info in notebookapp.list_running_servers ():
if Path (info['notebook_dir']) == servercwd:
print (info['url'])
return
# OK, need to start a server
info = cli.fork_detached_process ()
if info.whoami == 'original':
url = info.pipe.readline ().strip ()
if not len (url):
cli.die ('notebook server (PID %d) appears to have crashed', info.forkedpid)
print (url.decode ('ascii'))
else:
# We're the child. Set up to run as a background daemon as much as
# possible, then indicate to the parent that things look OK. NOTE:
# notebook's `argv` should not include what's traditionally called
# `argv[0]`.
os.chdir (str (servercwd))
app = BgNotebookApp.instance ()
app.initialize (argv=[])
info.pipe.write (app.display_url.encode ('ascii'))
info.pipe.write (b'\n')
info.pipe.close ()
with open (os.devnull, 'rb') as devnull:
os.dup2 (devnull.fileno (), 0)
with open (os.devnull, 'wb') as devnull:
for fd in 1, 2:
os.dup2 (devnull.fileno (), fd)
# Enter the main loop, never to leave again.
app.start ()
class GetNotebookPidCommand (multitool.Command):
name = 'get-notebook-pid'
argspec = ''
summary = 'Print the PID of the currently running notebook server, if any.'
help_if_no_args = False
more_help = '''\
If no server is currently running, a message is printed to standard error but
nothing is printed to stdout. Furthermore the exit code in this case is 1.'''
def invoke (self, args, **kwargs):
if len (args):
raise multitool.UsageError ('get-notebook-pid takes no arguments')
info = get_server_info ()
if info is None:
print ('(no notebook server is currently running)', file=sys.stderr)
sys.exit (1)
print (info['pid'])
class KillNotebookCommand (multitool.Command):
name = 'kill-notebook'
argspec = ''
summary = 'Kill the currently running notebook server, if any.'
help_if_no_args = False
more_help = '''\
If no server is currently running, a warning is printed to standard error, and
the exit code is 1.'''
def invoke (self, args, **kwargs):
if len (args):
raise multitool.UsageError ('kill-notebook takes no arguments')
info = get_server_info ()
if info is None:
print ('(no notebook server is currently running)', file=sys.stderr)
sys.exit (1)
# Not sure what Jupyter does when it gets SIGTERM, but to be safe let's
# shut down everything
from requests import request
from notebook.utils import url_path_join as ujoin
def command (verb, *paths):
resp = request (verb, ujoin (info['url'], *paths))
resp.raise_for_status ()
return resp
for sessinfo in command ('GET', 'api/sessions').json ():
command ('DELETE', 'api/sessions', sessinfo['id'])
for kerninfo in command ('GET', 'api/kernels').json ():
command ('DELETE', 'api/kernels', kerninfo['id'])
import os, signal
os.kill (info['pid'], signal.SIGTERM)
class JupyterTool (multitool.Multitool):
cli_name = 'pkenvtool jupyter'
summary = 'Helpers for the Jupyter environment.'
def commandline (argv):
from six import itervalues
tool = JupyterTool ()
tool.populate (itervalues (globals ()))
tool.commandline (argv)
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='b4rpipe',
packages=['b4rpipe'],
version='0.1.3',
license='MIT',
install_requires=['numpy','scipy','astropy','matplotlib','netcdf4','python-casacore','tqdm','sklearn'],
author='astroysmr',
author_email='[email protected]',
url='https://github.com/LMT-heterodyne-dev/b4rpipe',
description='Pipeline reduction tools for B4R (2mm heterodyne reciver) on LMT 50m @Mexico',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='B4R LMT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
"""empty message
Revision ID: 16b50a2c53b
Revises: 4a7e5abdf57
Create Date: 2015-12-21 14:51:48.537902
"""
# revision identifiers, used by Alembic.
revision = '16b50a2c53b'
down_revision = '4a7e5abdf57'
from alembic import op
import sqlalchemy as sa
def upgrade():
connection = op.get_bind()
### commands auto generated by Alembic - please adjust! ###
op.add_column('departments', sa.Column('short_name', sa.String(length=80), nullable=True))
op.create_unique_constraint("dept_short_name", 'departments', ['short_name'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("dept_short_name", 'departments', type_='unique')
op.drop_column('departments', 'short_name')
### end Alembic commands ###
|
class ArraySlicer:
def __init__(self, array):
self.array = array
def slice_into(self, slices):
if (slices < 0):
raise ValueError("was asked for %d slices, expected a minimum of 0" % slices)
if (slices == 0):
return []
slice_base_size, remaining = divmod(len(self.array), slices)
cursor = 0
output = []
for index in range(slices):
slice_size = slice_base_size
if (index < remaining):
slice_size += 1
slice_start = cursor
slice_end = slice_start + slice_size
cursor = slice_end
output.append(self.array[slice_start:slice_end])
return output |
import time
import sys
import cv2
from helper import utilty as util
class Timer(object):
def __init__(self, name):
self.name = name
self.start = 0
def __enter__(self):
self.start = time.time()
def __exit__(self, *a, **kw):
print('%s took %.3f seconds' % (self.name, time.time() - self.start))
def do_util(src, dest):
with Timer('util: load'):
inp = util.load_image(src)
with Timer('util: resize'):
resized = util.resize_image_by_pil(inp, 2)
with Timer('util: extract Y'):
only_y = util.convert_rgb_to_y(inp)
only_y = util.convert_rgb_to_y(resized) # simulate upscale
with Timer('util: rgb => YCbCr'):
scaled_ycbcr_image = util.convert_rgb_to_ycbcr(resized)
with Timer('util: Y + YCbCr -> rgb'):
image = util.convert_y_and_cbcr_to_rgb(only_y, scaled_ycbcr_image[:, :, 1:3])
with Timer('util: save'):
util.save_image(dest, image)
def do_cv(src, dest):
with Timer('cv2: load'):
inp = cv2.imread(src)
with Timer('cv2: resize'):
resized = cv2.resize(inp, (inp.shape[1] * 2, inp.shape[0] * 2))
with Timer('cv2: extract Y'):
ycbcr = cv2.cvtColor(inp, cv2.COLOR_RGB2YCR_CB)
only_Cb, only_Cr, only_y = cv2.split(ycbcr)
# simulate upscale
ycbcr = cv2.cvtColor(resized, cv2.COLOR_RGB2YCR_CB)
only_Cb, only_Cr, only_y = cv2.split(ycbcr)
with Timer('cv2: rgb => YCbCr'):
scaled_ycbcr_image = cv2.cvtColor(resized, cv2.COLOR_RGB2YCR_CB)
with Timer('cv2: Y + YCbCr -> rgb'):
new_Cb, new_Cr, new_y = cv2.split(scaled_ycbcr_image)
image = cv2.merge((new_Cb, new_Cr, only_y ))
image = cv2.cvtColor(image, cv2.COLOR_YCR_CB2RGB)
with Timer('cv2: save'):
cv2.imwrite(dest, image)
if __name__ == '__main__':
src = sys.argv[1:]
if not src:
sys.exit('Usage: %s test-image.png [test-image1.png]' % sys.argv[0])
for img in src:
with Timer('util convert'):
do_util(img, 'b_util.png')
print('-' * 10)
with Timer('cv2 convert'):
do_cv(img, 'b_cv.png')
|
"""Intro to Python - Part 1 - Hands-On Exercise."""
import math
import random
# TODO: Write a print statement that displays both the type and value of `pi`
pi = math.pi
print ("El valor de pi es:", pi) #Method 1
printpi = ("El valor de pi es: {}".format(pi)) #Method 2
print (printpi)
# TODO: Write a conditional to print out if `i` is less than or greater than 50
i = random.randint(0, 100)
if i < 50:
print (i,"is less than 50")
elif i > 50:
print (i,"is greather than 50")
else:
print (i,"is equal to 50")
# TODO: Write a conditional that prints the color of the picked fruit
picked_fruit = random.choice(['orange', 'lemon', 'banana'])
if picked_fruit == 'orange':
color = "orange"
print ("Your fruit is an {} an its color is {}".format(picked_fruit,color))
elif picked_fruit == 'lemon':
color = "green"
print ("Your fruit is an {} an its color is {}".format(picked_fruit,color))
else:
color = "yellow"
print ("Your fruit is an {} an its color is {}".format(picked_fruit,color))
# TODO: Write a function that multiplies two numbers and returns the result
# Define the function here.
i = random.randint(0, 100)
n = random.randint(0, 100)
def multiplier (num1,num2):
result = num1 * num2
return result
print ("The ramdom multiplier result is:", multiplier (i,n))
# TODO: Now call the function a few times to calculate the following answers
print("12 x 96 =",multiplier(12,96))
print("48 x 17 =",multiplier(48,17))
print("196523 x 87323 =",multiplier(196523,87323))
|
import os
import os.path as osp
import torch
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from skimage import io
from tqdm import tqdm
from core.trainer import Trainer
from utils.data_utils.misc import (
to_array, to_pseudo_color,
normalize_minmax, normalize_8bit,
quantize_8bit as quantize,
)
from utils.utils import build_schedulers, HookHelper, FeatureContainer
from utils.metrics import (Meter, Precision, Recall, Accuracy, F1Score)
class CDTrainer(Trainer):
def __init__(self, settings):
super().__init__(settings['model'], settings['dataset'], settings['criterion'], settings['optimizer'], settings)
# Set up tensorboard
self.tb_on = (hasattr(self.logger, 'log_path') or self.debug) and self.ctx['tb_on']
if self.tb_on:
if hasattr(self.logger, 'log_path'):
tb_dir = self.path(
'log',
osp.join('tb', osp.splitext(osp.basename(self.logger.log_path))[0], '.'),
name='tb',
auto_make=True,
suffix=False
)
else:
tb_dir = self.path(
'log',
osp.join('tb', 'debug', '.'),
name='tb',
auto_make=True,
suffix=False
)
for root, dirs, files in os.walk(self.gpc.get_dir('tb'), False):
for f in files:
os.remove(osp.join(root, f))
for d in dirs:
os.rmdir(osp.join(root, d))
self.tb_writer = SummaryWriter(tb_dir)
self.logger.show_nl("TensorBoard logdir: {}\n".format(osp.abspath(self.gpc.get_dir('tb'))))
self.tb_intvl = self.ctx['tb_intvl']
# Global steps
self.train_step = 0
self.eval_step = 0
# Whether to save network output
self.out_dir = self.ctx['out_dir']
self.save = self.ctx['save_on'] and not self.debug
# Build lr schedulers
self.sched_on = self.ctx['sched_on'] and self.is_training
if self.sched_on:
self.schedulers = build_schedulers(self.ctx['schedulers'], self.optimizer)
self._init_trainer()
def init_learning_rate(self):
if not self.sched_on:
return super().init_learning_rate()
else:
for idx, sched in enumerate(self.schedulers):
if self.start_epoch > 0:
if isinstance(sched, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.logger.warn("The old state of lr scheduler {} will not be restored.".format(idx))
continue
# Restore previous state
# FIXME: This will trigger pytorch warning "Detected call of `lr_scheduler.step()`
# before `optimizer.step()`" in pytorch 1.1.0 and later.
# Perhaps I should store the state of scheduler to a checkpoint file and restore it from disk.
last_epoch = self.start_epoch
while sched.last_epoch < last_epoch:
sched.step()
return self.optimizer.param_groups[0]['lr']
def adjust_learning_rate(self, epoch, acc):
if not self.sched_on:
return super().adjust_learning_rate(epoch, acc)
else:
for sched in self.schedulers:
if isinstance(sched, torch.optim.lr_scheduler.ReduceLROnPlateau):
sched.step(acc)
else:
sched.step()
return self.optimizer.param_groups[0]['lr']
def train_epoch(self, epoch):
losses = Meter()
len_train = len(self.train_loader)
width = len(str(len_train))
start_pattern = "[{{:>{0}}}/{{:>{0}}}]".format(width)
pb = tqdm(self.train_loader)
self.model.train()
for i, (t1, t2, tar) in enumerate(pb):
t1, t2, tar = self._prepare_data(t1, t2, tar)
show_imgs_on_tb = self.tb_on and (i%self.tb_intvl == 0)
fetch_dict = self._set_fetch_dict()
out_dict = FeatureContainer()
with HookHelper(self.model, fetch_dict, out_dict, hook_type='forward_out'):
out = self.model(t1, t2)
pred = self._process_model_out(out)
loss = self.criterion(pred, tar)
losses.update(loss.item(), n=self.batch_size)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
desc = (start_pattern+" Loss: {:.4f} ({:.4f})").format(i+1, len_train, losses.val, losses.avg)
pb.set_description(desc)
if i % max(1, len_train//10) == 0:
self.logger.dump(desc)
if self.tb_on:
# Write to tensorboard
self.tb_writer.add_scalar("Train/running_loss", losses.val, self.train_step)
if show_imgs_on_tb:
t1 = self._denorm_image(to_array(t1.data[0]))
t2 = self._denorm_image(to_array(t2.data[0]))
self.tb_writer.add_image("Train/t1_picked", normalize_8bit(t1), self.train_step, dataformats='HWC')
self.tb_writer.add_image("Train/t2_picked", normalize_8bit(t2), self.train_step, dataformats='HWC')
self.tb_writer.add_image("Train/labels_picked", to_array(tar[0]), self.train_step, dataformats='HW')
for key, feats in out_dict.items():
for idx, feat in enumerate(feats):
feat = self._process_fetched_feat(feat)
self.tb_writer.add_image(f"Train/{key}_{idx}", feat, self.train_step)
self.tb_writer.flush()
self.train_step += 1
if self.tb_on:
self.tb_writer.add_scalar("Train/loss", losses.avg, self.train_step)
self.tb_writer.add_scalar("Train/lr", self.lr, self.train_step)
def evaluate_epoch(self, epoch):
self.logger.show_nl("Epoch: [{0}]".format(epoch))
losses = Meter()
len_eval = len(self.eval_loader)
width = len(str(len_eval))
start_pattern = "[{{:>{0}}}/{{:>{0}}}]".format(width)
pb = tqdm(self.eval_loader)
# Construct metrics
metrics = (Precision(mode='accum'), Recall(mode='accum'), F1Score(mode='accum'), Accuracy(mode='accum'))
self.model.eval()
with torch.no_grad():
for i, (name, t1, t2, tar) in enumerate(pb):
t1, t2, tar = self._prepare_data(t1, t2, tar)
fetch_dict = self._set_fetch_dict()
out_dict = FeatureContainer()
with HookHelper(self.model, fetch_dict, out_dict, hook_type='forward_out'):
out = self.model(t1, t2)
pred = self._process_model_out(out)
loss = self.criterion(pred, tar)
losses.update(loss.item())
# Convert to numpy arrays
prob = self._pred_to_prob(pred)
prob = to_array(prob[0])
cm = (prob>0.5).astype('uint8')
tar = to_array(tar[0]).astype('uint8')
for m in metrics:
m.update(cm, tar)
desc = (start_pattern+" Loss: {:.4f} ({:.4f})").format(i+1, len_eval, losses.val, losses.avg)
for m in metrics:
desc += " {} {:.4f}".format(m.__name__, m.val)
pb.set_description(desc)
dump = not self.is_training or (i % max(1, len_eval//10) == 0)
if dump:
self.logger.dump(desc)
if self.tb_on:
if dump:
t1 = self._denorm_image(to_array(t1[0]))
t2 = self._denorm_image(to_array(t2[0]))
self.tb_writer.add_image("Eval/t1", normalize_8bit(t1), self.eval_step, dataformats='HWC')
self.tb_writer.add_image("Eval/t2", normalize_8bit(t2), self.eval_step, dataformats='HWC')
self.tb_writer.add_image("Eval/labels", quantize(tar), self.eval_step, dataformats='HW')
self.tb_writer.add_image("Eval/prob", to_pseudo_color(quantize(prob)), self.eval_step, dataformats='HWC')
self.tb_writer.add_image("Eval/cm", quantize(cm), self.eval_step, dataformats='HW')
for key, feats in out_dict.items():
for idx, feat in enumerate(feats):
feat = self._process_fetched_feat(feat)
self.tb_writer.add_image(f"Train/{key}_{idx}", feat, self.eval_step)
self.eval_step += 1
if self.save:
self.save_image(name[0], quantize(cm), epoch)
if self.tb_on:
self.tb_writer.add_scalar("Eval/loss", losses.avg, self.eval_step)
for m in metrics:
self.tb_writer.add_scalar(f"Eval/{m.__name__.lower()}", m.val, self.eval_step)
self.tb_writer.flush()
return metrics[2].val # F1-score
def save_image(self, file_name, image, epoch):
file_path = osp.join(
'epoch_{}'.format(epoch),
self.out_dir,
file_name
)
out_path = self.path(
'out', file_path,
suffix=not self.ctx['suffix_off'],
auto_make=True,
underline=True
)
return io.imsave(out_path, image)
def _denorm_image(self, x):
return x*np.asarray(self.ctx['sigma']) + np.asarray(self.ctx['mu'])
def _process_fetched_feat(self, feat):
feat = normalize_minmax(feat.mean(1))
feat = quantize(to_array(feat[0]))
feat = to_pseudo_color(feat)
return feat
def _init_trainer(self):
pass
def _prepare_data(self, t1, t2, tar):
return t1.to(self.device), t2.to(self.device), tar.to(self.device)
def _set_fetch_dict(self):
return dict()
def _process_model_out(self, out):
return out
def _pred_to_prob(self, pred):
return torch.nn.functional.sigmoid(pred) |
# Generated by Django 3.0.1 on 2020-02-18 02:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0007_auto_20200218_1030'),
]
operations = [
migrations.AlterField(
model_name='post',
name='cat',
field=models.CharField(blank=True, default='', max_length=200, verbose_name='網站類型'),
),
]
|
from django.apps import AppConfig
class PluginAppConfig(AppConfig):
name = "metagov.plugins.twitter"
label = "metagov_twitter" |
from pynet.models.densenet import DenseNet, F
from torch import nn
import torch, re, logging
from typing import List, Dict
class FusionDenseNet(DenseNet):
def __init__(self, modalities: List[str], fusion_scheme: str="late", agg_scheme: str="concat", device="cpu",
*args, **kwargs):
"""
:param modalities
List of modality's name to fuse
:param fusion_scheme, str default None
If set, the network can perform modality fusion in either early-, mid- or late- fusion.
:param agg_scheme: str, default None
The aggregation scheme bewteen n intermediate layers (for n modalities)
:param device: str
CPU or CUDA device
:param args, kwargs: arguments defining DenseNet backbone for all modalities
"""
super(DenseNet, self).__init__() # init torch Module
assert agg_scheme in ["cat", "concat", "avg", "max"] # NB: cat == concat
assert device in ["cpu", "cuda"]
self.fusion_scheme = fusion_scheme
self.agg_scheme = agg_scheme
self.modalities = modalities
self.device = device
self.logger = logging.getLogger("pynet")
nb_mod = len(modalities)
if fusion_scheme == "early":
# simple: we only need to redefine the # input channels
in_channels = kwargs.pop("in_channels", 1) * nb_mod
super().__init__(*args, in_channels=in_channels, **kwargs)
else:
self.nets = []
out_block = kwargs.pop("out_block", None)
for i in range(nb_mod):
if fusion_scheme == "late":
# simplest case where we only aggregate network's output
assert agg_scheme != "concat", "No concatenation implemented for late-fusion"
net = DenseNet(*args, **kwargs)
self.logger.warning("Softmax is applied for late-fusion with DenseNet !")
if net.num_classes == 1:
if agg_scheme == "max":
raise RuntimeError("Number of classes must be >= 2")
net.classifier = nn.Sequential(net.classifier, nn.Sigmoid())
else:
net.classifier = nn.Sequential(net.classifier, nn.Softmax(dim=-1))
elif fusion_scheme[:3] == "mid":
# trickier, it needs to define only the network until a certain block
try:
block = int(re.search("mid-([1-4])$", fusion_scheme)[1])
except TypeError:
raise ValueError("Block must be between 1 and 4. Got %s"%fusion_scheme)
net = DenseNet(*args, out_block="block%i"%block, **kwargs)
else:
raise ValueError("Unknown fusion scheme: %s"%fusion_scheme)
self.nets.append(net)
self.nets = nn.ModuleList(self.nets)
if fusion_scheme[:3] == "mid":
# Starts by defining the entire network
self.head = DenseNet(*args, out_block=out_block, **kwargs)
# Removes the first blocks
base_modules = [name for (name, _) in self.nets[0].features.named_children()]
filtered_modules = [m for (name, m) in self.head.features.named_children() if name not in base_modules]
# Small hack: add a conv layer to reduce the nb of feature maps
if self.agg_scheme in ["concat", "cat"]:
filtered_modules = [nn.Conv3d(nb_mod*self.nets[0].num_features, self.nets[0].num_features,
kernel_size=1)] + filtered_modules
self.head.features = nn.Sequential(*filtered_modules)
def aggregate(self, x: List[torch.Tensor]):
if self.agg_scheme == "avg":
return torch.mean(torch.stack(x, dim=1), dim=1)
elif self.agg_scheme == "max":
if self.fusion_scheme == "late": # pick the distribution with max probability
raise NotImplementedError("To be continued")
# x = torch.stack(x, dim=1) # size n_batch x n_mod x n_classes
# assert len(x.shape) == 3, "Wrong output shape {}".format(x.shape)
# max_distrib, _ = torch.max(x, dim=-1)
# _, argmax = torch.max(max_distrib, dim=1)
# return x[:, argmax]
else:
return torch.max(torch.stack(x, dim=1), dim=1)[0]
elif self.agg_scheme in ["concat", "cat"]:
return torch.cat(x, dim=1)
raise ValueError("Unknown aggregation scheme: %s"%self.agg_scheme)
def forward(self, x: Dict[str, List[torch.Tensor]]):
assert set(x.keys()) == set(self.modalities)
x = [torch.stack(x[mod], dim=0).to(self.device) for mod in self.modalities]
if self.fusion_scheme == "early":
## Fuses the inputs as several channels
return super().forward(torch.cat(x, dim=1))
if self.fusion_scheme == "late":
## Fuses the output prediction of each sub-network
out = self.aggregate([self.nets[i](x_) for (i, x_) in enumerate(x)])
return out
if self.fusion_scheme[:3] == "mid":
## Fuses the intermediate feature maps of each sub-network.
## Gives the aggregated features maps to the head.
out = self.aggregate([self.nets[i].features(x_) for (i, x_) in enumerate(x)])
out = self.head(out)
return out
class MultiModalDenseNet(nn.Module):
def __init__(self, modalities: List[str], *args, device: str='cuda', concatenate_out_mod: bool=False, **kwargs):
"""
:param modalities: listing all the modality names. It also defines the total # modalities.
:param concatenate_out_mod: bool, default False
whether we concatenate the ouput or not
:param args, kwargs: parameters to give to all DenseNet
"""
super(MultiModalDenseNet, self).__init__()
self.modalities = modalities
self.device = device
self.concatenate_out_mod = concatenate_out_mod
# No fusion, defines only individual network
for mod in self.modalities:
setattr(self, "densenet_" + mod, DenseNet(*args, **kwargs))
def forward(self, x: [Dict[str, torch.Tensor], torch.Tensor], mod: str=None):
"""
:param x: input images
:param mod: str, modality used if no fusion scheme set
:return: torch.Tensor
"""
if mod is None:
out = []
if self.concatenate_out_mod:
for mod in self.modalities:
net = getattr(self, "densenet_"+mod)
out.append(net(torch.stack(x[mod]).to(self.device)).view(len(x[mod]), -1))
return torch.cat(out, dim=1)
else:
assert len(self.modalities) == 1, "The modality needs to be specify"
net = getattr(self, "densenet_"+self.modalities[0])
else:
assert mod in self.modalities, "Unknown modality: %s"%mod
net = getattr(self, "densenet_"+mod)
return net(x)
def multimodal_densenet121(modalities, device='cpu', **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
"""
return MultiModalDenseNet(modalities, 32, (6, 12, 24, 16), 64, device=device, **kwargs)
def fusion_densenet121(mods, fusion_scheme: str="late", agg_scheme: str="concat", device="cpu", **kwargs):
return FusionDenseNet(mods, fusion_scheme, agg_scheme, device, 32, (6, 12, 24, 16), 64, **kwargs) |
from torch.utils.tensorboard import SummaryWriter
from base_config import BaseConfigByEpoch
from model_map import get_model_fn
from data.data_factory import create_dataset, load_cuda_data, num_iters_per_epoch
from torch.nn.modules.loss import CrossEntropyLoss
from utils.engine import Engine
from utils.pyt_utils import ensure_dir
from utils.misc import torch_accuracy, AvgMeter
from collections import OrderedDict
import torch
from tqdm import tqdm
import time
from builder import ConvBuilder
from utils.lr_scheduler import get_lr_scheduler
import os
from utils.checkpoint import get_last_checkpoint
from ndp_test import val_during_train
from sklearn.cluster import KMeans
import numpy as np
from csgd.csgd_prune import csgd_prune_and_save
TRAIN_SPEED_START = 0.1
TRAIN_SPEED_END = 0.2
COLLECT_TRAIN_LOSS_EPOCHS = 3
TEST_BATCH_SIZE = 100
KERNEL_KEYWORD = 'conv.weight'
def add_vecs_to_merge_mat_dicts(param_name_to_merge_matrix):
kernel_names = set(param_name_to_merge_matrix.keys())
for name in kernel_names:
bias_name = name.replace(KERNEL_KEYWORD, 'conv.bias')
gamma_name = name.replace(KERNEL_KEYWORD, 'bn.weight')
beta_name = name.replace(KERNEL_KEYWORD, 'bn.bias')
param_name_to_merge_matrix[bias_name] = param_name_to_merge_matrix[name]
param_name_to_merge_matrix[gamma_name] = param_name_to_merge_matrix[name]
param_name_to_merge_matrix[beta_name] = param_name_to_merge_matrix[name]
def generate_merge_matrix_for_kernel(deps, layer_idx_to_clusters, kernel_namedvalue_list):
result = {}
for layer_idx, clusters in layer_idx_to_clusters.items():
# 每层的通道数目
num_filters = deps[layer_idx]
# 构建 num_filters * num_filters 0的矩阵
merge_trans_mat = np.zeros((num_filters, num_filters), dtype=np.float32)
# 距离 clusters, 16 聚类 4 的结果 [[1, 10, 11, 12, 14], [3, 6], [0, 4, 7, 8, 9, 13], [2, 5, 15]]
for clst in clusters:
# 此时 clst 分别是 [1, 10, 11, 12, 14], [3, 6], [0, 4, 7, 8, 9, 13], [2, 5, 15]
if len(clst) == 1:
merge_trans_mat[clst[0], clst[0]] = 1
continue
sc = sorted(clst) # Ideally, clst should have already been sorted in ascending order
for ei in sc:
for ej in sc:
merge_trans_mat[ei, ej] = 1 / len(clst)
result[kernel_namedvalue_list[layer_idx].name] = torch.from_numpy(merge_trans_mat).cuda()
# 这样每层都能得到一个 聚类后id 的 matrix
# 这个 matrix 是为了加快计算用的
return result
# Recently it is popular to cancel weight decay on vecs
def generate_decay_matrix_for_kernel_and_vecs(deps, layer_idx_to_clusters, kernel_namedvalue_list,
weight_decay, weight_decay_bias, centri_strength):
result = {}
# for the kernel
for layer_idx, clusters in layer_idx_to_clusters.items():
num_filters = deps[layer_idx]
decay_trans_mat = np.zeros((num_filters, num_filters), dtype=np.float32)
for clst in clusters:
sc = sorted(clst)
for ee in sc:
decay_trans_mat[ee, ee] = weight_decay + centri_strength
for p in sc:
decay_trans_mat[ee, p] += -centri_strength / len(clst)
kernel_mat = torch.from_numpy(decay_trans_mat).cuda()
result[kernel_namedvalue_list[layer_idx].name] = kernel_mat
# for the vec params (bias, beta and gamma), we use 0.1 * centripetal strength
for layer_idx, clusters in layer_idx_to_clusters.items():
num_filters = deps[layer_idx]
decay_trans_mat = np.zeros((num_filters, num_filters), dtype=np.float32)
for clst in clusters:
sc = sorted(clst)
for ee in sc:
# Note: using smaller centripetal strength on the scaling factor of BN improve the performance in some of the cases
decay_trans_mat[ee, ee] = weight_decay_bias + centri_strength * 0.1
for p in sc:
decay_trans_mat[ee, p] += -centri_strength * 0.1 / len(clst)
vec_mat = torch.from_numpy(decay_trans_mat).cuda()
result[kernel_namedvalue_list[layer_idx].name.replace(KERNEL_KEYWORD, 'bn.weight')] = vec_mat
result[kernel_namedvalue_list[layer_idx].name.replace(KERNEL_KEYWORD, 'bn.bias')] = vec_mat
result[kernel_namedvalue_list[layer_idx].name.replace(KERNEL_KEYWORD, 'conv.bias')] = vec_mat
return result
def cluster_by_kmeans(kernel_value, num_cluster):
# 举例16 聚 4 的结果:result = [[1, 10, 11, 12, 14], [3, 6], [0, 4, 7, 8, 9, 13], [2, 5, 15]]
assert kernel_value.ndim == 4 # n,c,h,w
x = np.reshape(kernel_value, (kernel_value.shape[0], -1)) # n, c*h*w
if num_cluster == x.shape[0]: # if num_cluster == n, result = [0, 1, ..., n]
result = [[i] for i in range(num_cluster)]
return result
else:
print('cluster {} filters into {} clusters'.format(x.shape[0], num_cluster))
km = KMeans(n_clusters=num_cluster) # use sklearn.cluster.KMeans to cluster kernel_value
km.fit(x)
result = [] # record result
for j in range(num_cluster):
result.append([])
for i, c in enumerate(km.labels_):
result[c].append(i)
for r in result:
assert len(r) > 0
return result
def _is_follower(layer_idx, pacesetter_dict):
followers_and_pacesetters = set(pacesetter_dict.keys())
return (layer_idx in followers_and_pacesetters) and (pacesetter_dict[layer_idx] != layer_idx)
def get_layer_idx_to_clusters(kernel_namedvalue_list, target_deps, pacesetter_dict):
# 返回的是一个字典,每个 key 对应的 value 的值是一个长度等于当前层长度的聚类结果。[[1, 10, 11, 12, 14], [3, 6], [0, 4, 7, 8, 9, 13], [2, 5, 15]]
result = {}
for layer_idx, named_kv in enumerate(kernel_namedvalue_list):
num_filters = named_kv.value.shape[0]
if pacesetter_dict is not None and _is_follower(layer_idx, pacesetter_dict):
continue
if num_filters > target_deps[layer_idx]:
result[layer_idx] = cluster_by_kmeans(kernel_value=named_kv.value, num_cluster=target_deps[layer_idx])
elif num_filters < target_deps[layer_idx]:
raise ValueError('wrong target dep')
return result
def train_one_step(net, data, label, optimizer, criterion, param_name_to_merge_matrix, param_name_to_decay_matrix):
pred = net(data)
loss = criterion(pred, label)
loss.backward()
# 上面是正常的计算 loss, 和反向传播
#TODO note: C-SGD works here
for name, param in net.named_parameters():
name = name.replace('module.', '')
if name in param_name_to_merge_matrix:
p_dim = param.dim()
p_size = param.size()
if p_dim == 4:
param_mat = param.reshape(p_size[0], -1)
g_mat = param.grad.reshape(p_size[0], -1)
elif p_dim == 1:
param_mat = param.reshape(p_size[0], 1)
g_mat = param.grad.reshape(p_size[0], 1)
else:
assert p_dim == 2
param_mat = param
g_mat = param.grad
# 上面是获取当前的梯度,reshape 成 g_mat
# 下面是将 g_mat 按照文章中的公式,进行矩阵相乘和相加。
csgd_gradient = param_name_to_merge_matrix[name].matmul(g_mat) + param_name_to_decay_matrix[name].matmul(param_mat)
# 将计算的结果更新到参数梯度中。
param.grad.copy_(csgd_gradient.reshape(p_size))
optimizer.step()
optimizer.zero_grad()
acc, acc5 = torch_accuracy(pred, label, (1,5))
return acc, acc5, loss
def sgd_optimizer(engine, cfg, model, no_l2_keywords, use_nesterov, keyword_to_lr_mult):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.base_lr
weight_decay = cfg.weight_decay
if "bias" in key or "bn" in key or "BN" in key:
# lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.weight_decay_bias
engine.echo('set weight_decay_bias={} for {}'.format(weight_decay, key))
for kw in no_l2_keywords:
if kw in key:
weight_decay = 0
engine.echo('NOTICE! weight decay = 0 for {} because {} in {}'.format(key, kw, key))
break
if 'bias' in key:
apply_lr = 2 * lr
else:
apply_lr = lr
if keyword_to_lr_mult is not None:
for keyword, mult in keyword_to_lr_mult.items():
if keyword in key:
apply_lr *= mult
engine.echo('multiply lr of {} by {}'.format(key, mult))
break
params += [{"params": [value], "lr": apply_lr, "weight_decay": weight_decay}]
# optimizer = torch.optim.Adam(params, lr)
optimizer = torch.optim.SGD(params, lr, momentum=cfg.momentum, nesterov=use_nesterov)
return optimizer
def get_optimizer(engine, cfg, model, no_l2_keywords, use_nesterov=False, keyword_to_lr_mult=None):
return sgd_optimizer(engine, cfg, model, no_l2_keywords, use_nesterov=use_nesterov, keyword_to_lr_mult=keyword_to_lr_mult)
def get_criterion(cfg):
return CrossEntropyLoss()
# csgd 训练核心代码
def csgd_train_main(
local_rank,
cfg:BaseConfigByEpoch, target_deps, succeeding_strategy, pacesetter_dict, centri_strength, pruned_weights,
net=None, train_dataloader=None, val_dataloader=None, show_variables=False, convbuilder=None,
init_hdf5=None, no_l2_keywords='depth', use_nesterov=False,
load_weights_keyword=None,
keyword_to_lr_mult=None,
auto_continue=False, save_hdf5_epochs=10000):
ensure_dir(cfg.output_dir)
ensure_dir(cfg.tb_dir)
clusters_save_path = os.path.join(cfg.output_dir, 'clusters.npy')
with Engine(local_rank=local_rank) as engine:
engine.setup_log(
name='train', log_dir=cfg.output_dir, file_name='log.txt')
# ----------------------------- build model ------------------------------
if convbuilder is None:
convbuilder = ConvBuilder(base_config=cfg)
if net is None:
net_fn = get_model_fn(cfg.dataset_name, cfg.network_type)
model = net_fn(cfg, convbuilder)
else:
model = net
model = model.cuda()
# ----------------------------- model done ------------------------------
# ---------------------------- prepare data -------------------------
if train_dataloader is None:
train_data = create_dataset(cfg.dataset_name, cfg.dataset_subset,
cfg.global_batch_size, distributed=engine.distributed)
if cfg.val_epoch_period > 0 and val_dataloader is None:
val_data = create_dataset(cfg.dataset_name, 'val',
global_batch_size=100, distributed=False)
engine.echo('NOTE: Data prepared')
engine.echo('NOTE: We have global_batch_size={} on {} GPUs, the allocated GPU memory is {}'.format(cfg.global_batch_size, torch.cuda.device_count(), torch.cuda.memory_allocated()))
# ----------------------------- data done --------------------------------
# ------------------------ parepare optimizer, scheduler, criterion -------
if no_l2_keywords is None:
no_l2_keywords = []
if type(no_l2_keywords) is not list:
no_l2_keywords = [no_l2_keywords]
# For a target parameter, cancel its weight decay in optimizer, because the weight decay will be later encoded in the decay mat
conv_idx = 0
for k, v in model.named_parameters():
if v.dim() != 4:
continue
print('prune {} from {} to {}'.format(conv_idx, target_deps[conv_idx], cfg.deps[conv_idx]))
if target_deps[conv_idx] < cfg.deps[conv_idx]:
no_l2_keywords.append(k.replace(KERNEL_KEYWORD, 'conv'))
no_l2_keywords.append(k.replace(KERNEL_KEYWORD, 'bn'))
conv_idx += 1
print('no l2: ', no_l2_keywords)
optimizer = get_optimizer(engine, cfg, model, no_l2_keywords=no_l2_keywords, use_nesterov=use_nesterov, keyword_to_lr_mult=keyword_to_lr_mult)
scheduler = get_lr_scheduler(cfg, optimizer)
criterion = get_criterion(cfg).cuda()
# --------------------------------- done -------------------------------
engine.register_state(
scheduler=scheduler, model=model, optimizer=optimizer)
if engine.distributed:
torch.cuda.set_device(local_rank)
engine.echo('Distributed training, device {}'.format(local_rank))
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank],
broadcast_buffers=False, )
else:
assert torch.cuda.device_count() == 1
engine.echo('Single GPU training')
if cfg.init_weights:
engine.load_checkpoint(cfg.init_weights)
if init_hdf5:
engine.load_hdf5(init_hdf5, load_weights_keyword=load_weights_keyword)
if auto_continue:
assert cfg.init_weights is None
engine.load_checkpoint(get_last_checkpoint(cfg.output_dir))
if show_variables:
engine.show_variables()
# ===================================== prepare the clusters and matrices for C-SGD ==========
kernel_namedvalue_list = engine.get_all_conv_kernel_namedvalue_as_list()
if os.path.exists(clusters_save_path):
layer_idx_to_clusters = np.load(clusters_save_path, allow_pickle=True).item()
else:
if local_rank == 0:
# 获取聚类的 idx 结果
# # 返回的是一个字典,每个 key 是层id, 对应的 value 的值是一个长度等于当前层长度的聚类结果。[[1, 10, 11, 12, 14], [3, 6], [0, 4, 7, 8, 9, 13], [2, 5, 15]]
layer_idx_to_clusters = get_layer_idx_to_clusters(kernel_namedvalue_list=kernel_namedvalue_list,
target_deps=target_deps,
pacesetter_dict=pacesetter_dict)
# pacesetter_dict 是残差结构的连接层之间的关系。
if pacesetter_dict is not None:
for follower_idx, pacesetter_idx in pacesetter_dict.items():
# 这里将残差的最后一层的剪枝方案直接等同于直连的剪枝方案
if pacesetter_idx in layer_idx_to_clusters:
layer_idx_to_clusters[follower_idx] = layer_idx_to_clusters[pacesetter_idx]
# 保存聚类的 idx 结果
np.save(clusters_save_path, layer_idx_to_clusters)
else:
while not os.path.exists(clusters_save_path):
time.sleep(10)
print('sleep, waiting for process 0 to calculate clusters')
layer_idx_to_clusters = np.load(clusters_save_path, allow_pickle=True).item()
param_name_to_merge_matrix = generate_merge_matrix_for_kernel(deps=cfg.deps,
layer_idx_to_clusters=layer_idx_to_clusters,
kernel_namedvalue_list=kernel_namedvalue_list)
# 这块的功能似乎是要添加每层对于的 bias\gamma\beta 进入这个 param_name_to_merge_matrix
add_vecs_to_merge_mat_dicts(param_name_to_merge_matrix)
# core code 聚类结果的梯度计算,作为新的 weight decay
param_name_to_decay_matrix = generate_decay_matrix_for_kernel_and_vecs(deps=cfg.deps,
layer_idx_to_clusters=layer_idx_to_clusters,
kernel_namedvalue_list=kernel_namedvalue_list,
weight_decay=cfg.weight_decay,
weight_decay_bias=cfg.weight_decay_bias,
centri_strength=centri_strength)
print(param_name_to_decay_matrix.keys())
print(param_name_to_merge_matrix.keys())
conv_idx = 0
param_to_clusters = {} # 通过 layer_idx_to_clusters 获得 param_to_clusters
for k, v in model.named_parameters():
if v.dim() != 4:
continue
if conv_idx in layer_idx_to_clusters:
for clsts in layer_idx_to_clusters[conv_idx]:
if len(clsts) > 1:
param_to_clusters[v] = layer_idx_to_clusters[conv_idx]
break
conv_idx += 1
# ============================================================================================
# ------------ do training ---------------------------- #
engine.log("\n\nStart training with pytorch version {}".format(torch.__version__))
iteration = engine.state.iteration
iters_per_epoch = num_iters_per_epoch(cfg)
max_iters = iters_per_epoch * cfg.max_epochs
tb_writer = SummaryWriter(cfg.tb_dir)
tb_tags = ['Top1-Acc', 'Top5-Acc', 'Loss']
model.train()
done_epochs = iteration // iters_per_epoch
last_epoch_done_iters = iteration % iters_per_epoch
if done_epochs == 0 and last_epoch_done_iters == 0:
engine.save_hdf5(os.path.join(cfg.output_dir, 'init.hdf5'))
recorded_train_time = 0
recorded_train_examples = 0
collected_train_loss_sum = 0
collected_train_loss_count = 0
for epoch in range(done_epochs, cfg.max_epochs):
if engine.distributed and hasattr(train_data, 'train_sampler'):
train_data.train_sampler.set_epoch(epoch)
if epoch == done_epochs:
pbar = tqdm(range(iters_per_epoch - last_epoch_done_iters))
else:
pbar = tqdm(range(iters_per_epoch))
if epoch == 0 and local_rank == 0:
val_during_train(epoch=epoch, iteration=iteration, tb_tags=tb_tags, engine=engine, model=model,
val_data=val_data, criterion=criterion, descrip_str='Init',
dataset_name=cfg.dataset_name, test_batch_size=TEST_BATCH_SIZE,
tb_writer=tb_writer)
top1 = AvgMeter()
top5 = AvgMeter()
losses = AvgMeter()
discrip_str = 'Epoch-{}/{}'.format(epoch, cfg.max_epochs)
pbar.set_description('Train' + discrip_str)
for _ in pbar:
start_time = time.time()
data, label = load_cuda_data(train_data, dataset_name=cfg.dataset_name)
# load_cuda_data(train_dataloader, cfg.dataset_name)
data_time = time.time() - start_time
train_net_time_start = time.time()
acc, acc5, loss = train_one_step(model, data, label, optimizer, criterion,
param_name_to_merge_matrix=param_name_to_merge_matrix,
param_name_to_decay_matrix=param_name_to_decay_matrix)
train_net_time_end = time.time()
if iteration > TRAIN_SPEED_START * max_iters and iteration < TRAIN_SPEED_END * max_iters:
recorded_train_examples += cfg.global_batch_size
recorded_train_time += train_net_time_end - train_net_time_start
scheduler.step()
for module in model.modules():
if hasattr(module, 'set_cur_iter'):
module.set_cur_iter(iteration)
if iteration % cfg.tb_iter_period == 0 and engine.world_rank == 0:
for tag, value in zip(tb_tags, [acc.item(), acc5.item(), loss.item()]):
tb_writer.add_scalars(tag, {'Train': value}, iteration)
deviation_sum = 0
for param, clusters in param_to_clusters.items():
pvalue = param.detach().cpu().numpy()
for cl in clusters:
if len(cl) == 1:
continue
selected = pvalue[cl, :, :, :]
mean_kernel = np.mean(selected, axis=0, keepdims=True)
diff = selected - mean_kernel
deviation_sum += np.sum(diff ** 2)
tb_writer.add_scalars('deviation_sum', {'Train': deviation_sum}, iteration)
top1.update(acc.item())
top5.update(acc5.item())
losses.update(loss.item())
if epoch >= cfg.max_epochs - COLLECT_TRAIN_LOSS_EPOCHS:
collected_train_loss_sum += loss.item()
collected_train_loss_count += 1
pbar_dic = OrderedDict()
pbar_dic['data-time'] = '{:.2f}'.format(data_time)
pbar_dic['cur_iter'] = iteration
pbar_dic['lr'] = scheduler.get_lr()[0]
pbar_dic['top1'] = '{:.5f}'.format(top1.mean)
pbar_dic['top5'] = '{:.5f}'.format(top5.mean)
pbar_dic['loss'] = '{:.5f}'.format(losses.mean)
pbar.set_postfix(pbar_dic)
iteration += 1
if iteration >= max_iters or iteration % cfg.ckpt_iter_period == 0:
engine.update_iteration(iteration)
if (not engine.distributed) or (engine.distributed and engine.world_rank == 0):
engine.save_and_link_checkpoint(cfg.output_dir)
if iteration >= max_iters:
break
# do something after an epoch?
engine.update_iteration(iteration)
engine.save_latest_ckpt(cfg.output_dir)
if (epoch + 1) % save_hdf5_epochs == 0:
engine.save_hdf5(os.path.join(cfg.output_dir, 'epoch-{}.hdf5'.format(epoch)))
if local_rank == 0 and \
cfg.val_epoch_period > 0 and (epoch >= cfg.max_epochs - 10 or epoch % cfg.val_epoch_period == 0):
val_during_train(epoch=epoch, iteration=iteration, tb_tags=tb_tags, engine=engine, model=model,
val_data=val_data, criterion=criterion, descrip_str=discrip_str,
dataset_name=cfg.dataset_name, test_batch_size=TEST_BATCH_SIZE, tb_writer=tb_writer)
if iteration >= max_iters:
break
# do something after the training
if recorded_train_time > 0:
exp_per_sec = recorded_train_examples / recorded_train_time
else:
exp_per_sec = 0
engine.log(
'TRAIN speed: from {} to {} iterations, batch_size={}, examples={}, total_net_time={:.4f}, examples/sec={}'
.format(int(TRAIN_SPEED_START * max_iters), int(TRAIN_SPEED_END * max_iters), cfg.global_batch_size,
recorded_train_examples, recorded_train_time, exp_per_sec))
if cfg.save_weights:
engine.save_checkpoint(cfg.save_weights)
print('NOTE: training finished, saved to {}'.format(cfg.save_weights))
engine.save_hdf5(os.path.join(cfg.output_dir, 'finish.hdf5'))
if collected_train_loss_count > 0:
engine.log('TRAIN LOSS collected over last {} epochs: {:.6f}'.format(COLLECT_TRAIN_LOSS_EPOCHS,
collected_train_loss_sum / collected_train_loss_count))
if local_rank == 0:
csgd_prune_and_save(engine=engine, layer_idx_to_clusters=layer_idx_to_clusters,
save_file=pruned_weights, succeeding_strategy=succeeding_strategy, new_deps=target_deps) |
import numpy as np
import math
import skimage
class PreprocessImage():
def __init__(self, config):
"""config: COCO style configuration object for the Mask RCNN.
"""
self._config = config
def compose_image_meta(self, image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Returns input image metadata
"""
meta = np.array(
[image_id] +
list(original_image_shape) +
list(image_shape) +
list(window) +
[scale] +
list(active_class_ids)
)
return meta
def mold_image(self, images):
"""Substracts mean pixel from the image and casts dtype to float32.
"""
return images.astype(np.float32) - self._config.MEAN_PIXEL
def compute_backbone_shapes(self, image_shape):
"""Computes Mask RCNN backbone shapes
"""
if callable(self._config.BACKBONE):
return self._config.COMPUTE_BACKBONE_SHAPE(image_shape)
assert self._config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in self._config.BACKBONE_STRIDES])
def generate_anchors(self, scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack(
[box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
def norm_boxes(self, boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def generate_pyramid_anchors(self, scales, ratios, feature_shapes, feature_strides,
anchor_stride):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
Returns:
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
"""
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = []
for i in range(len(scales)):
anchors.append(self.generate_anchors(scales[i], ratios, feature_shapes[i],
feature_strides[i], anchor_stride))
return np.concatenate(anchors, axis=0)
def get_anchors(self,image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = self.compute_backbone_shapes(image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = self.generate_pyramid_anchors(
self._config.RPN_ANCHOR_SCALES,
self._config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self._config.BACKBONE_STRIDES,
self._config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = self.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def resize_image(self, image, min_dim=None, max_dim=None, min_scale=None, mode="square"):
"""Resizes an image keeping the aspect ratio unchanged.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
min_scale: if provided, ensure that the image is scaled up by at least
this percent even if min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the image unchanged.
square: Resize and pad with zeros to get a square image
of size [max_dim, max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If min_dim or min_scale are provided, it scales the image up
before padding. max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the image. First, scales the image based
on min_dim and min_scale, then picks a random crop of
size min_dim x min_dim. Can be used in training only.
max_dim is not used in this mode.
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of image dtype and return results in the same dtype
image_dtype = image.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
padding = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return image, window, scale, padding, crop
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
if min_scale and scale < min_scale:
scale = min_scale
# Does it exceed max dim?
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image using bilinear interpolation
if scale != 1:
image = skimage.transform.resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padding or cropping?
if mode == "square":
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
# Both sides must be divisible by 64
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def preprocess_input(self, img):
"""Pre-processes the input image.
img: Input image of shape (-1,XX,YY,3)
Returns:
molded_image: Molded image to be used as model input
image_meta: Input image metadata
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
"""
molded_image, window, scale, padding, crop = self.resize_image(
img,
min_dim=self._config.IMAGE_MIN_DIM,
min_scale=self._config.IMAGE_MIN_SCALE,
max_dim=self._config.IMAGE_MAX_DIM,
mode=self._config.IMAGE_RESIZE_MODE
)
molded_image = self.mold_image(molded_image)
image_meta = self.compose_image_meta(
0, img.shape, molded_image.shape, window, scale,
np.zeros([self._config.NUM_CLASSES], dtype=np.int32)
)
anchors = self.get_anchors(molded_image.shape)
#anchors = np.broadcast_to(anchors, (1,) + anchors.shape)
return molded_image, image_meta, anchors, window
|
import numpy as np
import mayavi.mlab as mlab
from desicos.conecylDB.read_write import xyz2thetazimp
R = 406.4 # nominal radius
H = 1219.2 # nominal height
R_best_fit = 407.193 # radius obtained with the best-fit routine
thetas, zfit, dR = np.genfromtxt('sample_theta_z_imp.txt', unpack=True)
xpos = R_best_fit*np.cos(thetas)
ypos = R_best_fit*np.sin(thetas)
sf = 20
x2 = xpos + sf*dR*np.cos(thetas)
y2 = ypos + sf*dR*np.sin(thetas)
z2 = zfit
Tinv = np.loadtxt('tmp_Tinv.txt')
x, y, z = Tinv.dot(np.vstack((x2, y2, z2, np.ones_like(x2))))
black = (0,0,0)
white = (1,1,1)
mlab.figure(bgcolor=white)
mlab.points3d(x, y, z, color=(0,1,0))
mlab.plot3d([0, 600], [0, 0], [0, 0], color=black, tube_radius=10.)
mlab.plot3d([0, 0], [0, 1600], [0, 0], color=black, tube_radius=10.)
mlab.plot3d([0, 0], [0, 0], [0, 600], color=black, tube_radius=10.)
mlab.text3d(650, -50, +50, 'x', color=black, scale=100.)
mlab.text3d(0, 1650, +50, 'y', color=black, scale=100.)
mlab.text3d(0, -50, 650, 'z', color=black, scale=100.)
mlab.savefig('plot_sample_3d.png', size=(500, 500))
|
# Making use of this to make a generic enabled file
# to load base adjutant-ui content.
FEATURE = "adjutant-ui-base"
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = [
'adjutant_ui',
]
|
import socket
REMOTE_SERVER = "one.one.one.one"
def is_connected():
hostname = REMOTE_SERVER
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(hostname)
# connect to the host -- tells us if the host is actually
# reachable
s = socket.create_connection((host, 80), 2)
s.close()
return True
except:
pass
return False
# is_connected(REMOTE_SERVER)
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
using build-in/factory icon
Tested environment:
Mac OS X 10.6.8
Install Oxygen icon on Mac OS X via MacPorts:
sudo port install oxygen-icons
http://doc.trolltech.com/latest/qicon.html
http://www.pyside.org/docs/pyside/PySide/QtGui/QIcon.html
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
import qutils
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
# NOTICE: the difference
print "themeName:", QtGui.QIcon.themeName()
print "hasThemeIcon:", QtGui.QIcon.hasThemeIcon("edit-undo")
qutils.config_theme_path()
print "themeName:", QtGui.QIcon.themeName()
print "hasThemeIcon:", QtGui.QIcon.hasThemeIcon("edit-undo")
print
my_online = QtGui.QIcon("/path/to/my_online.png")
icon = QtGui.QIcon.fromTheme("user-online", my_online)
print "icon not found:", icon.isNull()
print "availableSizes:", icon.availableSizes()
lab = QtGui.QLabel('foo', self)
pixmap = icon.pixmap(QtCore.QSize(32, 32), QtGui.QIcon.Normal, QtGui.QIcon.On)
lab.setPixmap(pixmap)
lab.move(10, 10)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_())
|
# coding: utf-8
# # Interpolate wavelength on multiple dimensions
#
# Jason Neal - 19th July 2017
# To try and interpolate N-D data along the first axis.
#
# This is to be able to perfrom chisquare analsysis for many parameters.
# In[ ]:
import numpy as np
import scipy as sp
from scipy.stats import chisquare
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# The model we have is obs (w, f), and model (wm, fm).
#
# the model is combined (x + (y*a)*v) * gamma two doppler shifts of v and gamma.
#
# We either need to be able to perform broadcasting inside Pyastronomy.dopplershift, or do it ourselves and interpolate.
#
# In[ ]:
w = np.arange(20)
A = 1.7
S = 1.1
f = A * np.sin(w) + S
plt.plot(w, f, label="data")
plt.legend()
plt.show()
# In[ ]:
wm = np.linspace(-3,23, 50)
fm = np.sin(wm)
plt.plot(wm, fm, label="model")
plt.plot(w, f, label="data")
plt.show()
# # Add a second axis for the amplitude
# In[ ]:
a = np.arange(1.3, 2, 0.05)
print(a)
a.shape
# In[ ]:
fma = fm[:, None] * a # Each column is
fma.shape
# In[ ]:
# make wavelength axis also the same
wma = wm[:, None] * np.ones_like(a)
wma.shape
# In[ ]:
# Need to interpolate fma from wma to w
# np.interp does not work on 2d.
w_func = sp.interpolate.interp1d(wm, fma, axis=0, kind="slinear")
# In[ ]:
fma_interp = w_func(w)
#fma_cube = w_func(w)
#fma_spl = w_func(w)
fma_interp.shape
# In[ ]:
plt.plot(w, fma_interp)
plt.plot(w, f, "--", label="data")
plt.legend()
plt.show()
# In[ ]:
chi2 = np.sum((f[:, None] - fma_interp)**2 / fma_interp, axis=0)
plt.plot(a, chi2, label="chi2")
plt.legend()
plt.show()
# In[ ]:
# Find the minimum value
m = np.argmin(chi2)
a_min = a[m]
a_min
# # Add a third axis for a vertical shift
#
# In[ ]:
shift = np.arange(0.1, 1.3, 0.1)
print(len(shift))
fmas = fma[:, :, None] + shift
fmas.shape
# In[ ]:
wmas = wma[:, :, None] * np.ones_like(shift)
wmas.shape
# In[ ]:
print(wm.shape)
print(fmas.shape)
w_sfunc = sp.interpolate.interp1d(wm, fmas, axis=0, kind="slinear")
fmas_interp = w_sfunc(w)
fmas_interp.shape
# In[ ]:
plt.plot(w, fmas_interp[:,3, :])
plt.plot(w, f, "--", label="data")
plt.legend()
plt.show()
# In[ ]:
chi2s = np.sum((f[:, None, None] - fmas_interp)**2 / fmas_interp, axis=0)
plt.plot(a, chi2s, label="chi2")
plt.legend()
plt.show()
# In[ ]:
X, Y = np.meshgrid(shift, a)
print(X.shape)
plt.contourf(X, Y, chi2s)
plt.colorbar()
plt.plot()
plt.show()
chi2s.shape
# In[ ]:
c2min = chi2s.argmin()
print(c2min)
chi2s[np.unravel_index(c2min, chi2s.shape)]
# In[ ]:
np.unravel_index(976, (140, 7))
# In[ ]:
plt.contour(chi2s)
plt.show()
# In[ ]:
# # Interpolating different wavelength axis.
#
# Each wl dimension has a dopplershift added.
#
# In[ ]:
c = 500
vc = (1 + np.arange(10) / c)
print(wm.shape)
print(vc.shape)
doppler = wm[:, np.newaxis] * vc
print(doppler.shape)
#print(doppler)
# In[ ]:
plt.plot(doppler, fmas[:,:,5])
plt.show()
# In[ ]:
# doppler_interp = sp.interpolate.interp1d(doppler, fm)
print(len(wm))
print(len(vc))
print(fma.shape) # fma includes the amplitude also.
# Cannot inperpolate directly for all the different wavelengths at once. Therefore
dims = fmas.shape + (len(vc),) # add extra arry to dim
print(dims)
result = np.empty(dims)
print(result.shape)
for i, v in enumerate(vc):
wm_vc = wm * v
# print(wm_vc.shape)
# print(fma.shape)
func = sp.interpolate.interp1d(wm_vc, fmas, axis=0, bounds_error=False, fill_value=99999)
# print(func(wm))
result[:,:, :, i] = func(wm)
# In[ ]:
print(result)
# # This lets me doppler shift the wavelength and return it to wm.
#
# In the second case for model I will just want to return it to the wavelength values of the observation.
# In[ ]:
# interp to obs
func = sp.interpolate.interp1d(wm, result, axis=0, bounds_error=False, fill_value=np.nan)
fmasd = func(w)
chi2d = np.sum((f[:, None, None, np.newaxis] - fmasd)**2 / fmasd, axis=0)
chi2d
# In[ ]:
chi2d.shape
# In[ ]:
fmasd.shape
# In[ ]:
a.shape
# In[ ]:
# Try a 3d chisquare
x_2 = chisquare(f[:, np.newaxis, np.newaxis, np.newaxis], fmasd, axis=0).statistic
x_2.argmin()
vals = np.unravel_index(x_2.argmin(), x_2.shape)
print("index of min = ", vals) # returns the minimum index location
# This provides a framework for chisquare of large arrays. for my simulations
# In[ ]:
plt.title("shift min")
plt.contourf(x_2[:,3,:])
plt.show()
plt.contourf(x_2[4,:,:])
plt.title("amplitude min")
plt.show()
plt.contourf(x_2[:,:,4])
plt.title("doppler min")
plt.show()
# Currently these plots do not look very informative. I will need to remove the bad interpolation values also.
# In[ ]:
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 10:58:24 2018
@author: Ramsey
"""
import matplotlib.pyplot as plt
import numpy as np
def x(xo, param, iters, plt):
for i in range(iters):
#this is the function that we're plotting...
xo = xo*xo + param
pts = []
for i in range(iters):
#the function we're plotting
#it is repeated here -- perhaps should place elsewhere
xo = xo*xo + param
plt.scatter(param, xo, s=1)
iters = 100
stored = 100
param_low = -2
param_high = 0.25
totalsteps = 10.0
step = (param_high - param_low) / totalsteps
print("totalsteps=", totalsteps)
i = param_low
while i < param_high:
print("i is now:", i)
pts = x(1, i, iters, plt)
i = i + step
plt.show()
|
# compare the observed species richnesses to the 95 percentiles of the neutral model
import pandas as pd
import numpy as np
# parameters
# ---
# which fit should we use for the parameter values?
rho = 1700
subset_name = 'survey_only'
# where to put results
dir_results = '../../results/neutral_data/'
# where is processed information about islands?
dir_processed = '../../data/processed/'
# where to put results
dir_results = '../../results/neutral_data/'
# get area and real richness of each island
# ---
# get island name to richness
fname_islands = dir_processed + 'island_subsets/island_bird_presence_absence_' + subset_name + '.csv'
df_pamatrix = pd.read_csv(fname_islands)
island_names = list(df_pamatrix.columns[1:]) # fixed ordering of island names
richness = [ sum(df_pamatrix[island_name]) for island_name in island_names ]
# get island name to area
fname_area = dir_processed + 'island_area.csv'
df_area = pd.read_csv(fname_area)
df_area.set_index('island_name', inplace=True)
areas = [ df_area.loc[island_name]['area_sq_km'] for island_name in island_names ]
# get the species richness of each model sample
# ---
fname = dir_results + 'samples_' + subset_name + '_rho' + str(rho) + '.csv'
df = pd.read_csv(fname)
# calculate the mean and percentiles of richness for each island
# ---
# create means, hi, and lo
mean_sampled_richness = [ np.mean(df['no_spp_isle_' + island_name]) for island_name in island_names ]
hi_sampled_richness = [ np.percentile(df['no_spp_isle_' + island_name], 97.5) for island_name in island_names ]
lo_sampled_richness = [ np.percentile(df['no_spp_isle_' + island_name], 2.5) for island_name in island_names ]
# check if true is within percentiles
# ---
check_within = [ 'yes' if S >= lo and S <= hi else 'no' for S, lo, hi in zip(richness, lo_sampled_richness, hi_sampled_richness) ]
# save information to file
# ---
# put it into a dataframe
df_out = pd.DataFrame(zip( island_names, areas, richness, check_within, mean_sampled_richness, lo_sampled_richness, hi_sampled_richness ),
columns=['island_name', 'area_sq_km', 'S', 'true_S_in_bounds?', 'model_mean_S', 'model_lo_S', 'model_hi_S'])
df_out.to_csv( dir_results + 'percentiles_richness' + subset_name + '.csv', index=False)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
# acting on behalf of its Max Planck Institute for Intelligent Systems and the
# Max Planck Institute for Biological Cybernetics. All rights reserved.
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
# on this computer program. You can only use this computer program if you have closed a license agreement
# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and liable to prosecution.
# Contact: [email protected]
#
#
# If you use this code in a research publication please consider citing the following:
#
# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>
#
#
# Code Developed by:
# Nima Ghorbani <https://nghorbani.github.io/>
#
# 2020.12.12
import numpy as np
import torch
from human_body_prior.models.model_components import BatchFlatten
from human_body_prior.tools.rotation_tools import matrot2aa
from torch import nn
from torch.nn import functional as F
class ContinuousRotReprDecoder(nn.Module):
def __init__(self):
super(ContinuousRotReprDecoder, self).__init__()
def forward(self, module_input):
bs, d = module_input.size()
nj = d // 6 # num_joints inference
reshaped_input = module_input.view(bs*nj, 3, 2)
b1 = F.normalize(reshaped_input[:, :, 0], dim=1)
dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True)
b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1)
b3 = torch.cross(b1, b2, dim=1)
return torch.stack([b1, b2, b3], dim=-1)
class NormalDistDecoder(nn.Module):
def __init__(self, num_feat_in, latentD):
super(NormalDistDecoder, self).__init__()
self.mu = nn.Linear(num_feat_in, latentD)
self.logvar = nn.Linear(num_feat_in, latentD)
def forward(self, Xout):
return torch.distributions.normal.Normal(self.mu(Xout), F.softplus(self.logvar(Xout)))
class VPoser(nn.Module):
def __init__(self, model_ps):
super(VPoser, self).__init__()
num_neurons, self.latentD = model_ps.model_params.num_neurons, model_ps.model_params.latentD
self.num_joints = 21
n_features = self.num_joints * 3
self.encoder_net = nn.Sequential(
BatchFlatten(),
nn.BatchNorm1d(n_features),
nn.Linear(n_features, num_neurons),
nn.LeakyReLU(),
nn.BatchNorm1d(num_neurons),
nn.Dropout(0.1),
nn.Linear(num_neurons, num_neurons),
nn.Linear(num_neurons, num_neurons),
NormalDistDecoder(num_neurons, self.latentD)
)
self.decoder_net = nn.Sequential(
nn.Linear(self.latentD, num_neurons),
nn.LeakyReLU(),
nn.Dropout(0.1),
nn.Linear(num_neurons, num_neurons),
nn.LeakyReLU(),
nn.Linear(num_neurons, self.num_joints * 6),
ContinuousRotReprDecoder(),
)
def encode(self, pose_body):
'''
:param Pin: Nx(numjoints*3)
:param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle
:return:
'''
return self.encoder_net(pose_body)
def decode(self, Zin):
bs = Zin.shape[0]
prec = self.decoder_net(Zin)
return {
'pose_body': matrot2aa(prec.view(-1, 3, 3)).view(bs, -1, 3),
'pose_body_matrot': prec.view(bs, -1, 9)
}
def forward(self, pose_body):
'''
:param Pin: aa: Nx1xnum_jointsx3 / matrot: Nx1xnum_jointsx9
:param input_type: matrot / aa for matrix rotations or axis angles
:param output_type: matrot / aa
:return:
'''
q_z = self.encode(pose_body)
q_z_sample = q_z.rsample()
decode_results = self.decode(q_z_sample)
decode_results.update({'poZ_body_mean': q_z.mean, 'poZ_body_std': q_z.scale, 'q_z': q_z})
return decode_results
def sample_poses(self, num_poses, seed=None):
np.random.seed(seed)
some_weight = [a for a in self.parameters()][0]
dtype = some_weight.dtype
device = some_weight.device
self.eval()
with torch.no_grad():
Zgen = torch.tensor(np.random.normal(0., 1., size=(num_poses, self.latentD)), dtype=dtype, device=device)
return self.decode(Zgen)
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for user-visible error exceptions to raise in the CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import exceptions
class ComputeError(exceptions.Error):
"""Exceptions for compute common errors."""
class ArgumentError(ComputeError):
"""Command argument error."""
class AbortedError(ComputeError):
"""Operation aborted exception."""
class UpdatePropertyError(ComputeError):
"""Require at least on property to be modified exception."""
class ValidationError(ComputeError):
"""YAML data does not match the schema."""
class DuplicateError(ComputeError):
"""Duplicate param error."""
class FailedPromptError(ComputeError):
"""Prompt failure."""
class InvalidResourceError(ComputeError):
"""Invalid resource URI."""
|
import socket
import os
from PIL import Image
from assets.font import numbers_15x28
if socket.gethostname() == "rpiv2":
import cili9486
# width, height, SPI, SPEED, CS, RST, RS
lcd = cili9486.ili(320, 480, 0, 3200000, 8, 25, 24)
else:
import cili9325
import RPi.GPIO # pylint: disable=I0011,F0401
RPi.GPIO.setmode(RPi.GPIO.BCM)
RPi.GPIO.setup(6, RPi.GPIO.OUT)
RPi.GPIO.output(6, 1)
lcd = cili9325.ili(320, 240, 18, 27, 17, 25, 22, 23, 24, 5, 12, 16, 20, 21)
lcd.rotation = 0
lcd.init()
FONTS = {
'15x28': numbers_15x28.Numbers(),
}
#print(lcd.transparency_color );
# image_file = Image.open("assets/japan_temple_240x320.jpg")
# lcd.draw_image(0, 0, image_file)
lcd.draw_image(0, 0, "assets/japan_temple_240x320.jpg")
lcd.transparency_color = (255, 255, 255)
# lcd.transparency_color = ((1, 1, 1), (9, 9, 9))
lcd.draw_image(10, 10, "assets/numbers.jpg")
# lcd.transparency_color = (0, 0, 0)
# lcd.draw_image(10, 0, "assets/dsp2017_101_64.png")
numbers_image = Image.open("assets/dsp2017_101_64.png")
lcd.transparency_color = (0, 0, 0)
# print(lcd.transparency_color)
lcd.draw_image(10, 100, numbers_image)
# lcd.transparency_color = ((4,4,4),(1, 1, 1), (9, 9, 9))
#lcd.transparency_color = None
lcd.transparency_color = FONTS['15x28'].get_transparency()
lcd.draw_image(
10,
10,
FONTS['15x28'].get(0)
)
lcd.draw_image(
30,
10,
FONTS['15x28'].get(1)
)
lcd.draw_image(
50,
10,
FONTS['15x28'].get(9)
)
print("end") |
from fabric.api import local
from fabric.tasks import Task
from vagrant import collectstatic, css_compile, killall
from fabric.colors import red, green, yellow
from agency_vars import with_vars, APP_INFO
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
# suppress tasks
__all__ = []
def get_hash(env):
# print red("CHECKING OUT BRANCH: {}".format(APP_INFO[env]["branch_name"]))
# local('git checkout {}'.format(APP_INFO[env]["branch_name"]))
return local('git rev-parse HEAD', capture=True).strip()[:20]
def get_heroku_asset_version(env):
git_hash = local('heroku config:get ASSET_VERSION -a {}'.format(APP_INFO[env]["heroku_app_name"]), capture=True)
print "got hash: ", yellow('{}'.format(git_hash))
return git_hash
def set_heroku_asset_version(env, git_hash):
local('heroku config:set ASSET_VERSION={} -a {}'.format(git_hash, APP_INFO[env]["heroku_app_name"]))
def set_heroku_maintenance_page(env, url):
local('heroku config:set MAINTENANCE_PAGE_URL={} -a {}'.format(url, APP_INFO[env]["heroku_app_name"]))
def set_heroku_error_page(env, url):
local('heroku config:set ERROR_PAGE_URL={} -a {}'.format(url, APP_INFO[env]["heroku_app_name"]))
def current_asset_version(env, ):
return get_hash(env)
class CustomTask(Task):
def __init__(self, func, env, *args, **kwargs):
super(CustomTask, self).__init__(*args, **kwargs)
self.func = func
self.env = env
self.name = func.__name__
self.__doc__ = func.__doc__
def run(self, *args, **kwargs):
if 'env' not in kwargs:
kwargs['env'] = self.env
return self.func(*args, **kwargs)
def deploy(env=None, quick=False):
"""Deploy static and source to heroku environment"""
local('sudo ntpdate -u 0.pool.ntp.org')
local('pip install -r requirements/production.txt')
version = get_heroku_asset_version(env) if quick else current_asset_version(env=env)
current_branch = local('git rev-parse --abbrev-ref HEAD',capture=True)
if current_branch != APP_INFO[env]['branch_name']:
print red("NOT ON DESIRED DEPLOY BRANCH: {}".format(APP_INFO[env]["branch_name"]))
compile_env_css(env=env, asset_version=version)
deploy_static_media(env=env, asset_version=version, quick=quick)
deploy_maintenance_pages(env=env, asset_version=version, quick=quick)
deploy_source(env=env, asset_version=version, quick=quick)
clear_cache(env=env, asset_version=version, quick=quick)
@with_vars
def deploy_maintenance_pages(env=None, asset_version='', quick=False, agency_vars={}):
url = "{}error.html".format(agency_vars['STATIC_URL'])
print(agency_vars)
set_heroku_error_page(env, url)
set_heroku_maintenance_page(env, url)
@with_vars
def deploy_source(env=None, asset_version='', quick=False, agency_vars={}):
"""Deploy source to heroku environment"""
print green('Deploying source to Heroku')
local('git push {} {}:master'.format(APP_INFO[env]["heroku_remote_name"], APP_INFO[env]["branch_name"]))
sync_prod_db(env=env)
if not quick:
set_heroku_asset_version(env, asset_version)
@with_vars
def clear_cache(env=None, asset_version='', quick=False, agency_vars={}):
local('heroku run ./manage.py clear_cache -a {}'.format(APP_INFO[env]["heroku_app_name"]))
@with_vars
def deploy_static_media(env=None, asset_version='', quick=False, agency_vars={}):
"""Deploy static (runs collectstatic within given environment)"""
print green('Deploying static media {}'.format('__quick__' if quick else ''))
collectstatic(no_input=True, skip_admin=quick)
@with_vars
def deploy_user_media(env=None, agency_vars={} ):
"""Deploy user media to media location on s3"""
print green('Deploying user media')
local('./manage.py sync_media_s3 --prefix=uploads')
@with_vars
def sync_prod_db(env=None, reset_db=False, agency_vars={}):
"""Run syncdb and migrate within given environment"""
print green('sync/migrate DB')
if reset_db:
# uncomment below and replace DATABSE_URL with the prod database url
# note that this is destructive of the PROD DB
#local('heroku pg:reset DATABASE_URL') #add "--confirm haus" to remove required input
pass
local('heroku run ./manage.py syncdb -a {}'.format(APP_INFO[env]["heroku_app_name"]))
local('heroku run ./manage.py migrate -a {}'.format(APP_INFO[env]["heroku_app_name"]))
@with_vars
def compile_env_css(env=None, asset_version='', agency_vars={}):
log.warning(red('Calling killall, all process will be killed!'))
killall()
css_compile()
log.warning(red('Note killall was called so vagrant server/compass will be down')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.