content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
qstode.searcher
~~~~~~~~~~~~~~~
Whoosh search engine support.
:copyright: (c) 2013 by Daniel Kertesz
:license: BSD, see LICENSE for more details.
"""
import os
import json
import redis
from whoosh.fields import ID, TEXT, KEYWORD, Schema
from whoosh.analysis import RegexTokenizer, LowercaseFilter, CharsetFilter
from whoosh.support.charset import accent_map
from whoosh.index import create_in, open_dir, exists_in
from whoosh.writing import AsyncWriter
from whoosh.qparser import MultifieldParser
from whoosh.sorting import Facets
# Constants used in the Redis message queue
OP_INDEX, OP_UPDATE, OP_DELETE = list(range(3))
# Queue names for Redis
QUEUE_INDEX = "index_in"
QUEUE_WORK = "index_work"
def generate_schema():
"""Generates the search engine schema"""
text_analyzer = RegexTokenizer() | LowercaseFilter() | CharsetFilter(accent_map)
schema = Schema(
id=ID(stored=True, unique=True),
title=TEXT(stored=False, analyzer=text_analyzer),
tags=KEYWORD(stored=False, lowercase=True, commas=True),
notes=TEXT(stored=False, analyzer=text_analyzer),
)
return schema
def create_document(bookmark):
"""Creates a Document (a dict) for the search engine"""
return {
"id": str(bookmark.id),
"title": bookmark.title or "",
"notes": bookmark.notes or "",
"tags": ", ".join([tag.name for tag in bookmark.tags]),
}
def redis_connect(config):
"""Connects to a Redis database as specified by the dictionary `config`"""
return redis.Redis(
host=config.get("REDIS_HOST", "localhost"),
port=config.get("REDIS_PORT", 6379),
db=config.get("REDIS_DB", 0),
password=config.get("REDIS_PASSWORD"),
)
class WhooshSearcher(object):
"""Interface to a Whoosh based Search Engine"""
# default search fields for user queries
search_fields = ("notes", "title", "tags")
def __init__(self, app=None, index_dir=None):
self.app = app
self.index_dir = index_dir
self._ix = None
self._redis = None
@property
def ix(self):
"""Lazy opening of the Whoosh index"""
if self._ix is None:
self._ix = self._open_index()
return self._ix
@property
def redis(self):
"""Lazy opening of the Redis connection"""
if self._redis is None:
self._redis = redis_connect(self.app.config)
return self._redis
def init_app(self, app):
"""Initialize module and checks if the index exists"""
self.app = app
if "WHOOSH_INDEX_PATH" not in self.app.config:
raise Exception("You must set the WHOOSH_INDEX_PATH option " "in the configuration")
self.index_dir = self.app.config["WHOOSH_INDEX_PATH"]
if not exists_in(self.index_dir):
self.setup_index()
def setup_index(self):
"""Create the index directory"""
if not os.path.exists(self.index_dir):
os.mkdir(self.index_dir)
schema = generate_schema()
self._ix = create_in(self.index_dir, schema)
def _open_index(self):
ix = open_dir(self.index_dir)
return ix
def get_async_writer(self):
"""Return an AsyncWriter; NOTE that we NEED thread support (i.e when
you're running in uwsgi"""
return AsyncWriter(self.ix)
def push_add_bookmark(self, bookmark):
"""Pushes a 'add bookmark' operation to the Redis queue"""
r = self.redis
payload = json.dumps((OP_INDEX, bookmark.id))
r.rpush(QUEUE_INDEX, payload)
def push_update_bookmark(self, bookmark):
"""Pushes a 'update bookmark' operation to the Redis queue"""
self.push_add_bookmark(bookmark)
def push_delete_bookmark(self, bookmark_id):
"""Pushes a 'delete bookmark' operation to the Redis queue"""
r = self.redis
payload = json.dumps((OP_DELETE, bookmark_id))
r.rpush(QUEUE_INDEX, payload)
def add_bookmark(self, bookmark, writer=None):
"""Index a bookmark, updating it if it's already indexed;
if you pass a `writer` object you are responsible for calling
`commit()` at the end of the operations.
If no `writer` is passed an AsyncWriter will be used.
"""
document = create_document(bookmark)
if writer is None:
writer = self.get_async_writer()
writer.update_document(**document)
writer.commit()
else:
writer.update_document(**document)
def update_bookmark(self, bookmark, writer=None):
"""Reindex a Bookmark"""
self.add_bookmark(bookmark, writer)
def delete_bookmark(self, bookmark_id, writer=None):
"""Delete a Bookmark from the index"""
_id = str(bookmark_id)
if writer is None:
writer = self.get_async_writer()
writer.delete_by_term("id", _id)
writer.commit()
else:
writer.delete_by_term("id", _id)
def search(self, query, page=1, page_len=10, fields=None):
"""Returns the results of a search engine query ordered by
Whoosh default ordering (?).
:returns: a list of bookmark id (int)
"""
if fields is None:
fields = tuple(self.search_fields)
results = None
with self.ix.searcher() as searcher:
parser = MultifieldParser(fields, self.ix.schema)
whoosh_query = parser.parse(query)
facets = Facets()
facets.add_field("tags", allow_overlap=True)
# this can raise a ValueError
search_results = searcher.search_page(
whoosh_query, page, pagelen=page_len, groupedby=facets
)
results = [int(result["id"]) for result in search_results]
return results or []
|
python
|
################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
########################################################################
#
# Date: 2015 Authors: Michel Sanner
#
# [email protected]
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Michel Sanner and TSRI 2015
#
#########################################################################
#
# $Header: /mnt/raid/services/cvs/python/packages/share1.5/mglutil/util/io.py,v 1.1.4.1 2017/07/26 22:31:38 annao Exp $
#
# $Id: io.py,v 1.1.4.1 2017/07/26 22:31:38 annao Exp $
#
class Stream:
def __init__(self):
self.lines = []
def write(self, line):
self.lines.append(line)
# helper class to make stdout set of lines look like a file that ProDy can parse
class BufferAsFile:
def __init__(self, lines):
self.lines = lines
def readlines(self):
return self.lines
|
python
|
import math
from PyQt5 import QtCore
from PyQt5.QtCore import QUrl, QObject, pyqtSignal
from PyQt5.QtGui import QMouseEvent, QColor
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QWidget, QSlider, QGraphicsDropShadowEffect, QFrame
from cvstudio.util import VideoUtilities
class VideoPlayerWidgetSignals(QObject):
video_position_changed_signal = pyqtSignal(int, int)
video_duration_changed_signal = pyqtSignal(int)
class VideoPlayer(QWidget):
def __init__(self, parent=None):
super(VideoPlayer, self).__init__(parent)
self._source = None
self._total_duration = 0
self.widget_layout = QVBoxLayout()
self.media_player = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.video_player = QVideoWidget()
self.widget_layout.addWidget(self.video_player)
self.media_player.setVideoOutput(self.video_player)
# self.media_player.stateChanged.connect(self.mediaStateChanged)
self.media_player.positionChanged.connect(self.on_positionChanged)
self.signals = VideoPlayerWidgetSignals()
self.media_player.durationChanged.connect(self.on_durationChanged)
self.setLayout(self.widget_layout)
print(self.media_player.duration())
@property
def total_duration(self):
return self._total_duration
@total_duration.setter
def total_duration(self, val):
self._total_duration = val
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
self._total_duration = math.floor(VideoUtilities.duration(self.source))
def play(self):
if self._source:
self.media_player.setMedia(
QMediaContent(QUrl.fromLocalFile(self._source)))
self.media_player.play()
def resume(self):
if self.media_player.state() == QMediaPlayer.PlayingState:
self.media_player.pause()
else:
self.media_player.play()
def stop(self):
if self.media_player.state() == QMediaPlayer.PlayingState:
self.media_player.stop()
def go_to(self, second: int):
if self.media_player:
self.media_player.setPosition(second * 1000)
@QtCore.pyqtSlot('qint64')
def on_positionChanged(self, position):
self.signals.video_position_changed_signal.emit(math.floor(position / 1000), self.total_duration)
if self.media_player.state() == QMediaPlayer.StoppedState:
if 0 <= position <= self.total_duration * 1000:
self.media_player.play()
@QtCore.pyqtSlot('qint64')
def on_durationChanged(self, duration):
self.signals.video_duration_changed_signal.emit(math.floor(duration / 1000))
class VideoViewerContainer(QWidget):
def __init__(self, window: QDialog, parent=None):
super(VideoViewerContainer, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.resize(1024, 580)
layout = QVBoxLayout(self)
layout.addWidget(window)
layout.setContentsMargins(0, 0, 6, 6)
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(50)
self.shadow.setColor(QColor(138, 145, 140))
self.shadow.setOffset(8)
window.setGraphicsEffect(self.shadow)
class VideoDialog(QDialog):
def __init__(self, video_path, parent=None):
super(VideoDialog, self).__init__(parent)
# self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.resize(400, 400)
position = self.cursor().pos()
position.setX(position.x())
position.setY(position.y())
self.move(position)
# self.setWindowOpacity(0.9)
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.widget = QFrame()
self.widget.setStyleSheet('''
QFrame{
border-style: outset;
border-width: 1px;
/*border-radius: 10px;*/
border-color: #B94129;
}
''')
# self.widget.setFrameStyle(QFrame.Box)
self.widget.setLayout(QVBoxLayout())
self.widget.layout().setContentsMargins(10, 10, 10, 10)
self.video_player = VideoPlayer()
self.video_player.source = video_path
self.video_player.play()
self.video_player.signals.video_position_changed_signal.connect(self.video_position_changed)
duration = self.video_player.total_duration
self.video_duration_slider = QSlider(orientation=QtCore.Qt.Horizontal)
self.video_duration_slider.setRange(0, duration)
self.video_duration_slider.setTickInterval(5)
self.video_duration_slider.sliderMoved.connect(self.slider_changed_handler)
# self.video_duration_slider.setTickPosition(QSlider.TicksBelow)
self.setMouseTracking(True)
self.setWindowFlags(QtCore.Qt.Popup | QtCore.Qt.WindowStaysOnTopHint
| QtCore.Qt.FramelessWindowHint
| QtCore.Qt.X11BypassWindowManagerHint)
# self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.widget.layout().addWidget(self.video_player)
self.widget.layout().addWidget(self.video_duration_slider)
self.layout().addWidget(self.widget)
def setMouseTracking(self, flag):
def set_mouse_tracking(parent):
for child in parent.findChildren(QtCore.QObject):
try:
child.setMouseTracking(flag)
except:
pass
set_mouse_tracking(child)
QWidget.setMouseTracking(self, flag)
set_mouse_tracking(self)
def slider_changed_handler(self, change):
self.video_player.go_to(change)
def video_position_changed(self, current, total):
self.video_duration_slider.setValue(current)
def mouseMoveEvent(self, event: QMouseEvent) -> None:
# print('mouseMoveEvent: x=%d, y=%d' % (event.x(), event.y()))
if not self.rect().contains(event.pos()):
self.close()
|
python
|
# -*- coding: utf-8 -*-
import abc
from inspect import isfunction, signature
from types import FunctionType
from watson import di
from watson.common.contextmanagers import suppress
from watson.common import imports
from watson.di.types import FUNCTION_TYPE
class Base(di.ContainerAware, metaclass=abc.ABCMeta):
"""The base processor that all other processors should extend.
When a processor is called from the container the following parameters are
sent through with the event.
- definition: The dict definition of the dependency
- dependency: The name of the dependency
Depending on the event, a different target will also be sent with the event.
- watson.di.container.PRE_EVENT: The dict definition of the dependency
- watson.di.container.POST_EVENT: The initialized dependency
"""
@abc.abstractmethod
def __call__(self, event):
raise NotImplementedError(
'The processor <{}> must implement __call__'.format(imports.get_qualified_name(self))) # pragma: no cover
def get_args_kwargs(self, obj):
args, kwargs = [], {}
if isinstance(obj, dict):
for key, val in obj.items():
kwargs[key] = get_param_from_container(val, self.container)
elif isinstance(obj, list):
for arg in obj:
args.append(get_param_from_container(arg, self.container))
return args, kwargs
class ConstructorInjection(Base):
"""Responsible for initializing the dependency.
Responsible for initializing the dependency and injecting any required
values into the constructor.
Args:
event (watson.events.types.Event): The event dispatched from the container.
Returns:
mixed: The dependency
"""
def instantiate(self, definition):
item = definition['item']
if hasattr(item, '__ioc_definition__'):
definition.update(item.__ioc_definition__)
args, kwargs = [], {}
is_lambda = definition.get('call_type', None) == FUNCTION_TYPE
sig = signature(item)
if 'container' in sig.parameters:
kwargs['container'] = self.container
if 'init' in definition:
init = definition['init']
updated_args, updated_kwargs = self.get_args_kwargs(init)
args.extend(updated_args)
kwargs.update(updated_kwargs)
if isfunction(init):
sig = signature(init)
if 'container' in sig.parameters:
kwargs['container'] = self.container
init = init(*args, **kwargs)
definition['init'] = init
if not is_lambda:
args, kwargs = self.get_args_kwargs(init)
item = item(*args, **kwargs)
if is_lambda and isinstance(item, str):
# Special case for items that might be retrieved via lambda expressions
with suppress(Exception):
definition['item'] = self.container.load_item_from_string(item)
item, args, kwargs = self.instantiate(definition)
return item, args, kwargs
def __call__(self, event):
definition = event.params['definition']
item, args, kwargs = self.instantiate(definition)
return item
class SetterInjection(Base):
"""Responsible for injecting required values into setter methods.
Args:
event (watson.events.types.Event): The event dispatched from the container.
Returns:
mixed: The dependency
"""
def __call__(self, event):
item = event.target
definition = event.params['definition']
if 'setter' in definition:
for setter, args in definition['setter'].items():
method = getattr(item, setter)
if isinstance(args, dict):
kwargs = {arg: get_param_from_container(
value,
self.container) for arg,
value in args.items()}
method(**kwargs)
elif isinstance(args, list):
args = [get_param_from_container(arg, self.container)
for arg in args]
method(*args)
else:
method(get_param_from_container(args, self.container))
return item
class AttributeInjection(Base):
"""Responsible for injecting required values into attributes.
Args:
event (watson.events.types.Event): The event dispatched from the
container.
Returns:
mixed: The dependency
"""
def __call__(self, event):
item = event.target
if 'property' in event.params['definition']:
for prop, value in event.params['definition']['property'].items():
setattr(
item,
prop,
get_param_from_container(
value,
self.container))
return item
class ContainerAware(Base):
"""Injects the container into a dependency.
Responsible for injecting the container in any class that extends
watson.di.ContainerAware. The container is then accessible via object.container
Args:
event (watson.events.types.Event): The event dispatched from the container.
Returns:
mixed: The dependency
"""
def __call__(self, event):
item = event.target
if isinstance(item, di.ContainerAware):
item.container = self.container
return item
def get_param_from_container(param, container):
"""Internal function used by the container.
Retrieve a parameter from the container, and determine whether or not that
parameter is an existing dependency.
Returns:
mixed: The dependency (if param name is the same as a dependency), the
param, or the value of the param.
"""
if param in container.params:
param = container.params[param]
if param in container:
param = container.get(param)
elif param in container:
param = container.get(param)
else:
if isinstance(param, FunctionType):
param = param(container)
return param
|
python
|
from __future__ import print_function
import json as json_lib
from threading import Lock
import requests.adapters
import py42.settings as settings
from py42._internal.compat import str
from py42._internal.compat import urljoin
from py42._internal.compat import urlparse
from py42.exceptions import raise_py42_error
from py42.response import Py42Response
from py42.settings import debug
from py42.util import format_dict
def _print_request(method, url, params=None, data=None):
debug.logger.info(u"{}{}".format(str(method).ljust(8), url))
if params:
debug.logger.debug(format_dict(params, u" params"))
if data:
debug.logger.debug(format_dict(data, u" data"))
class Py42Session(object):
def __init__(self, session, host_address, auth_handler=None):
self._initialized = False
self._needs_auth_renewal_check = False
self._auth_lock = Lock()
self._session = session
adapter = requests.adapters.HTTPAdapter(pool_connections=20, pool_maxsize=20)
if not host_address.startswith(u"http://") and not host_address.startswith(
u"https://"
):
host_address = u"https://{}".format(host_address)
self._host_address = host_address
self._auth_handler = auth_handler
self._session.proxies = settings.proxies
self._session.verify = settings.verify_ssl_certs
self._session.mount(u"https://", adapter)
self._session.mount(u"http://", adapter)
self._host_address = host_address
parsed_host = urlparse(self._host_address)
host = parsed_host.netloc
self._session.headers = {
u"Accept": u"application/json",
u"Content-Type": u"application/json",
u"Host": host,
u"User-Agent": settings.get_user_agent_string(),
u"Accept-Encoding": u"gzip, deflate",
u"Connection": u"keep-alive",
}
@property
def host_address(self):
return self._host_address
@property
def headers(self):
return self._session.headers
@property
def cookies(self):
return self._session.cookies
@property
def proxies(self):
return self._session.proxies
def get(self, url, **kwargs):
return self.request(u"GET", url, **kwargs)
def options(self, url, **kwargs):
return self.request(u"OPTIONS", url, **kwargs)
def head(self, url, **kwargs):
return self.request(u"HEAD", url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
return self.request(u"POST", url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
return self.request(u"PUT", url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
return self.request(u"PATCH", url, data=data, **kwargs)
def delete(self, url, **kwargs):
return self.request(u"DELETE", url, **kwargs)
def request(self, method, url, **kwargs):
try:
url = urljoin(self._host_address, url)
json = kwargs.get(u"json")
if json is not None:
kwargs[u"data"] = json_lib.dumps(_filter_out_none(json))
if u"json" in kwargs:
del kwargs[u"json"]
self._renew_authentication(use_cache=True)
tries = 0
max_tries = 2
while tries < max_tries:
response, unauthorized = self._try_make_request(method, url, **kwargs)
tries += 1
if unauthorized and tries < max_tries:
self._renew_authentication()
continue
if response.status_code >= 400:
response.raise_for_status()
if not kwargs.get(u"stream"):
response.encoding = (
u"utf-8" # setting this manually speeds up read times
)
return Py42Response(response)
except requests.HTTPError as err:
raise_py42_error(err)
def _try_make_request(
self,
method,
url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=60,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
):
_print_request(method, url, params=params, data=data)
response = self._session.request(
method,
url,
params=params,
data=data,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
)
unauthorized = (
self._auth_handler
and self._auth_handler.response_indicates_unauthorized(response)
)
return response, unauthorized
def _renew_authentication(self, use_cache=False):
if self._auth_handler:
# if multiple threads try to authenticate at once, only the first one actually does.
# the rest will just wait for that authentication to complete.
self._needs_auth_renewal_check = True
with self._auth_lock:
# only get new credentials if this is the first time or we want fresh ones
should_renew = (
not self._initialized or not use_cache
) and self._needs_auth_renewal_check
if should_renew:
self._auth_handler.renew_authentication(self, use_cache=use_cache)
self._needs_auth_renewal_check = False
# if there's no auth handler or we handled auth without errors, initialization is done.
self._initialized = True
def _filter_out_none(_dict):
return {key: _dict[key] for key in _dict if _dict[key] is not None}
|
python
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tasks to set the parameters of arbitrary waveform generators.
"""
import logging
import numbers
import numpy as np
from atom.api import (Enum, Str, Value)
from exopy.tasks.api import (InstrumentTask, validators)
#signal analyzer driver
import exopy_qcircuits.instruments.drivers.visa.rohde_and_schwarz_psa as sa
SA_SWEEPING = 0x0
SA_REAL_TIME = 0x1
class TuneIQMixerTask(InstrumentTask):
""" Task to tune an IQ mixer in SSB
Implicit use of a SignalHound spectrum analyzer
Tunes channels I and Q DC offset, relative delay and voltage
to suppress LO leakage and unwanted sideband
TODO: handle task with two instruments: AWG AND Spectrum analyzer
TODO: implement realtime sweep for better SNR
"""
# Get user inputs
channelI = Enum('Ch1', 'Ch2', 'Ch3', 'Ch4').tag(pref=True)
channelQ = Enum('Ch1', 'Ch2', 'Ch3', 'Ch4').tag(pref=True)
# LO frequency
freq = Str('0.0').tag(pref=True,
feval=validators.SkipLoop(types=numbers.Real))
# modulation frequency
det = Str('0.0').tag(pref=True,
feval=validators.SkipLoop(types=numbers.Real))
# LO power frequency
power = Str('0.0').tag(pref=True,
feval=validators.SkipLoop(types=numbers.Real))
# Desired sideband, e.g. if Lower, suppress freq and freq+det
SB = Enum('Lower', 'Upper').tag(pref=True)
my_sa = Value() # signal analyzer
chI = Value()
chQ = Value()
freq_Hz = Value()
det_Hz = Value()
SB_sgn = Value()
LO_pow = Value()
def check(self, *args, **kwargs):
''' Default checks and check different AWG channels
'''
test, traceback = super(TuneIQMixerTask, self).check(*args, **kwargs)
if not test:
return test, traceback
if self.channelI == self.channelQ:
test = False
msg = 'I and Q channels need to be different !'
traceback[self.get_error_path()] = msg
return test, traceback
def perform(self):
"""Default interface behavior.
"""
# open signal analyzer Rhode&Schwarz
visa_address = 'TCPIP0::192.168.0.52::inst0::INSTR'
connection_infos = {'resource_name': visa_address}
self.my_sa = sa.RohdeAndSchwarzPSA(connection_infos)#, mode=SA_SWEEPING)
# AWG channels
awg = self.driver
awg.run_mode = 'CONT'
awg.output_mode = 'FIX'
self.chI = awg.get_channel(int(self.channelI[-1]))
self.chQ = awg.get_channel(int(self.channelQ[-1]))
# convert user inputs into adequate units
self.LO_pow = self.format_and_eval_string(self.power)
self.freq_Hz = self.format_and_eval_string(self.freq)*1e9
self.det_Hz = self.format_and_eval_string(self.det)*1e6 #modulation freq
self.SB_sgn = 1 if self.SB == 'Lower' else -1
# setting the modulation frequency for each channel
self.chI.set_frequency(self.det_Hz)
self.chQ.set_frequency(self.det_Hz)
# Initialize AWG params
# set parameters to minima or centers everywhere
# initialisation
self.chI_vpp(0.15)
self.chQ_vpp(0.15)
self.chI_offset(0)
self.chQ_offset(0)
self.chQ_delay(0)
# perform optimization twice
self.tune_ssb('lo')
self.tune_ssb('sb')
pos_lo, cost = self.tune_ssb('lo')
pos_sb, cost = self.tune_ssb('sb')
# get power for optimal parameters at sig, leakage and sideband
# get_single_freq(self,freq,reflevel,rbw,vbw,avrg_num)
sig = self.my_sa.get_single_freq(self.freq_Hz-self.SB_sgn*self.det_Hz,
self.LO_pow,int(1e3),int(1e3),10)
lo = self.my_sa.get_single_freq(self.freq_Hz,self.LO_pow,1e3,1e3,10)
sb = self.my_sa.get_single_freq(self.freq_Hz+self.SB_sgn*self.det_Hz,
self.LO_pow,int(1e3),int(1e3),10)
# close signal analyzer
self.my_sa._close()
# log values
log = logging.getLogger(__name__)
msg1 = 'Tuned IQ mixer at LO = %s GHz, IF = %s MHz, \
Signal: %s dBm, LO: %s dBm, SB: %s dBm' % \
(1e-9*self.freq_Hz, 1e-6*self.det_Hz, sig, lo, sb)
log.info(msg1)
msg2 = 'chI offset: %s V, chQ offset: %s V, chQvpp: %s V, \
chQphase: %s °' % \
(pos_lo[0], pos_lo[1], pos_sb[0], pos_sb[1])
log.info(msg2)
# optimization procedure
def tune_ssb(self, mode):
# suppress lo leakage params
if mode == 'lo':
param1 = self.chI_offset
param2 = self.chQ_offset
f = self.freq_Hz
minvals = np.array([-1,-1])
maxvals = np.array([1,1])
precision = np.array([0.001,0.001])
pos0 = np.array([self.get_chI_offset(), self.get_chQ_offset()])
# suppress other sideband params
elif mode == 'sb':
param1 = self.chQ_vpp
param2 = self.chQ_delay
f = self.freq_Hz + self.SB_sgn*self.det_Hz
minvals = np.array([0.05,0])
maxvals = np.array([0.15,360])
precision = np.array([0.01,0.1])
pos0 = np.array([self.get_chQ_vpp(), self.get_chQ_delay()])
else:
msg = '''param has wrong value, should be lo or sb,
received %s''' % mode
raise ValueError(msg)
# 4 directions in parameter search space
sens = [np.array([1, 0]), np.array([0, 1]),
np.array([-1, 0]), np.array([0, -1])]
# initial cost (cost = power of sa at f)
cost0 = self.cost(param1, param2, pos0[0], pos0[1], f)
### Qcircuits STARTS HERE ###
dec = 0.1
s = 0
c = 0
counter = 0
poslist = [pos0]
precision_reached = False
# stop search when step_size < AWG resolution
while dec >= 0.0001:
step_sizes = dec*(maxvals-minvals)
#check that we aren't lower than instrument precision
if ((step_sizes[0] - precision[0]) < 0) and \
((step_sizes[1] - precision[1]) < 0):
precision_reached = True
step_sizes = precision
elif (step_sizes[0] - precision[0]) < 0:
step_sizes[0] = precision[0]
elif (step_sizes[1] - precision[1]) < 0:
step_sizes[1] = precision[1]
# break when max eval count has reach or
# all 4 directions have been explored
while c < 4 and counter < 1000:
# probe cost at new pos: pos1
pos1 = pos0 + step_sizes*sens[s]
# check that we aren't out of bounds
if (not (minvals[0] <= pos1[0] <= maxvals[0])) and \
(not (minvals[1] <= pos1[1] <= maxvals[1])):
boundaries = np.array([minvals,
np.array([minvals[0],maxvals[1]]),
np.array([minvals[1],maxvals[0]]),
maxvals])
# find bounardy closest to current value
pos1 = boundaries[np.argmin(list(map(abs,
boundaries-pos1)))]
elif not (minvals[0] <= pos1[0] <= maxvals[0]):
boundaries = np.array([minvals[0],maxvals[0]])
# find bounardy closest to current value
pos1[0] = boundaries[np.argmin(list(map(abs,
boundaries-pos1[0])))]
elif not (minvals[1] <= pos1[1] <= maxvals[1]):
boundaries = np.array([minvals[1],maxvals[1]])
# find bounardy closest to current value
pos1[1] = boundaries[np.argmin(list(map(abs,
boundaries-pos1[1])))]
# evaluate cost of new position
cost1 = self.cost(param1, param2, pos1[0], pos1[1], f)
counter += 1
# if lower cost, update pos
if cost1 < cost0:
cost0 = cost1
pos0 = pos1
c = 0
poslist.append(pos0)
else:
c += 1
s = np.mod(s+1, 4)
c = 0
# decrease dec if all explored directions give higher cost
dec /= 10
if precision_reached:
break
return pos0, cost0
# optimization cost function: get power in dBm at f from signal_analyzer
def cost(self, param1, param2, val1, val2, f):
param1(val1)
param2(val2)
return self.my_sa.get_single_freq(f,self.LO_pow,1e3,1e3,10)
# define AWG getter and setter functions to pass into cost function
def chI_offset(self, value):
self.chI.set_DC_offset(value)
def chQ_offset(self, value):
self.chQ.set_DC_offset(value)
def get_chI_offset(self):
return self.chI.DC_offset()
def get_chQ_offset(self):
return self.chQ.DC_offset()
def chI_vpp(self, value):
self.chI.set_Vpp(value)
def chQ_vpp(self, value):
self.chQ.set_Vpp(value)
def get_chI_vpp(self):
return self.chI.Vpp()
def get_chQ_vpp(self):
return self.chQ.Vpp()
def chQ_delay(self, value):
self.chQ.set_phase(value)
def get_chQ_delay(self):
return self.chQ.phase()
|
python
|
import sys
import SocketServer
import ssl
import struct
import socket
import comms
def parse_table():
ret = {}
ret["major"] = "bloodnok"
ret["harry"] = "seagoon"
return ret
class listener(SocketServer.BaseRequestHandler):
CMD_QUERY_ALL = 0
CMD_TRIGGER = 1
CMD_EXIT = 2
CMD_DISPLAY_SHOW = 0
CMD_DISPLAY_EXIT = 1
STATUS_OK = 0
STATUS_FAIL = 1
def handle(self):
global table
global display
size = comms.get_u32(self.request)
msg = comms.read_all(self.request, size)
code = struct.unpack('>I', msg[:4])[0]
if code == listener.CMD_QUERY_ALL:
self.request.send(struct.pack('>I', listener.STATUS_OK))
msg = b''
for k in table.keys():
msg += struct.pack('>I', len(k))
msg += k
self.request.send(struct.pack('>I', len(msg)))
self.request.send(msg)
return
if code == listener.CMD_TRIGGER:
key = msg[4:]
if key not in table:
self.request.send(struct.pack('>I', listener.STATUS_FAIL))
return
pw = table[key]
print "Displaying password '%s'" %pw
display.send(struct.pack('>II', listener.CMD_DISPLAY_SHOW, len(pw)))
display.send(pw)
status = comms.read_all(self.request, 4)
self.request.send(status)
return
if code == listener.EXIT:
display.send(struct.pack('>I', listener.CMD_DISPLAY_EXIT))
sys.exit(0)
print "Received unknown command %d" % code
cert_file='server.crt'
key_file='server.key'
ca_file='root.crt'
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
display = ssl.wrap_socket(client_sock, key_file, cert_file, False,
ssl.CERT_REQUIRED, ssl.PROTOCOL_TLSv1_2,
ca_file)
display.connect(('127.0.0.1', 6655))
print "TLS connection established"
table = parse_table()
SocketServer.allow_reuse_address = True
server = SocketServer.TCPServer(('127.0.0.1', 6512), listener)
server.serve_forever()
|
python
|
from __future__ import absolute_import
import blosc
try:
import cPickle as pickle_
except ImportError:
import pickle as pickle_
import os
import collections
from . import report
from . import object as object_
from . import path
from os.path import join
from os.path import isdir
class PickleByName(object):
"""Makes un-pickle-able objects pick-able by setting its un-pickle-able
attributes as signature only attributes."""
def __init__(self):
self._signature_only_attrs = set()
def set_signature_only_attr(self, attr_name):
self._signature_only_attrs.add(attr_name)
def __getstate__(self):
d = self.__dict__.copy()
for attr_name in self._signature_only_attrs:
o = getattr(self, attr_name)
d[attr_name + '_fullname'] = object_.fullname(o)
d[attr_name + '_init_dict'] = o.init_dict()
del d[attr_name]
return d
def __setstate__(self, d):
import importlib
for attr_name in d['_signature_only_attrs']:
fn = d[attr_name + '_fullname']
k = fn.rfind(".")
module_name, class_name = fn[:k], fn[k+1:]
init_dict = d[attr_name + '_init_dict']
mod = importlib.import_module(module_name)
class_ = getattr(mod, class_name)
o = class_(**init_dict)
d[attr_name] = o
del d[attr_name + '_fullname']
del d[attr_name + '_init_dict']
self.__dict__.update(d)
class SlotPickleMixin(object):
"""Top-class that allows mixing of classes with and without slots.
Takes care that instances can still be pickled with the lowest
protocol. Moreover, provides a generic `__dir__` method that
lists all slots.
"""
# We want to allow weak references to the objects
__slots__ = ['__weakref__']
def _get_all_slots(self):
"""Returns all slots as set"""
all_slots = (getattr(cls, '__slots__', [])
for cls in self.__class__.__mro__)
return set(slot for slots in all_slots for slot in slots)
def __getstate__(self):
if hasattr(self, '__dict__'):
# We don't require that all sub-classes also define slots,
# so they may provide a dictionary
statedict = self.__dict__.copy()
else:
statedict = {}
# Get all slots of potential parent classes
for slot in self._get_all_slots():
try:
value = getattr(self, slot)
statedict[slot] = value
except AttributeError:
pass
# Pop slots that cannot or should not be pickled
statedict.pop('__dict__', None)
statedict.pop('__weakref__', None)
return statedict
def __setstate__(self, state):
for key, value in state.items():
setattr(self, key, value)
def __dir__(self):
result = dir(self.__class__)
result.extend(self._get_all_slots())
if hasattr(self, '__dict__'):
result.extend(self.__dict__.keys())
return result
def pickle(obj, filepath):
arr = pickle_.dumps(obj, -1)
with open(filepath, 'wb') as f:
s = 0
while s < len(arr):
e = min(s + blosc.MAX_BUFFERSIZE, len(arr))
carr = blosc.compress(arr[s:e], typesize=8)
f.write(carr)
s = e
def unpickle(filepath):
arr = []
with open(filepath, 'rb') as f:
while True:
carr = f.read(blosc.MAX_BUFFERSIZE)
if len(carr) == 0:
break
arr.append(blosc.decompress(carr))
return pickle_.loads(b"".join(arr))
def _save_cache(folder, lastmodif_hash):
fpath = join(folder, '.folder_hash')
with open(fpath, 'w') as f:
f.write(lastmodif_hash)
def _get_file_list(folder):
file_list = []
for (dir_, _, files) in os.walk(folder):
if dir_ == folder:
continue
for f in files:
fpath = join(dir_, f)
if fpath.endswith('pkl') and os.path.basename(fpath) != 'all.pkl':
file_list.append(fpath)
return file_list
def _merge(file_list):
pbar = report.ProgressBar(len(file_list))
out = dict()
for (i, fpath) in enumerate(file_list):
d = unpickle(fpath)
if isinstance(d, collections.Iterable):
out.update(d)
else:
key = os.path.basename(fpath).split('.')[0]
out[int(key)] = d
pbar.update(i+1)
pbar.finish()
return out
def pickle_merge(folder):
"""Merges pickle files from the specified folder and save it to `all.pkl`.
"""
file_list = _get_file_list(folder)
if len(file_list) == 0:
print('There is nothing to merge because no file'+
' has been found in %s.' % folder)
return
with report.BeginEnd('Computing hashes'):
ha = path.folder_hash(folder, ['all.pkl', '.folder_hash'])
subfolders = [d for d in os.listdir(folder) if isdir(join(folder, d))]
with path.temp_folder() as tf:
for sf in subfolders:
path.make_sure_path_exists(join(tf, sf))
path.cp(join(folder, sf), join(tf, sf))
file_list = _get_file_list(tf)
with report.BeginEnd('Merging pickles'):
out = _merge(file_list)
with report.BeginEnd('Storing pickles'):
pickle(out, join(folder, 'all.pkl'))
_save_cache(folder, ha)
return out
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*
"""
The MIT License (MIT)
Copyright (c) 2015 Christophe Aubert
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "Christophe Aubert"
__version__ = "1.0"
import dataBase.sqlite3
from dataBase.sqlite3 import ConnectDB
class CreateDB(ConnectDB.ConnectDB):
"""
Classe CreateDb permet de création de la basse de données avec toute ces tables
"""
def __init__(self,path,name):
"""
Init
@param path:
@param name:
"""
ConnectDB.ConnectDB.__init__(self, path, name)
self.connect()
def create(self):
"""
Méthode de création des tables balise et value dans la basse de donnée
"""
try:
self.cursor.execute('''CREATE TABLE `probe` (
`p_id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`p_name` REAL NOT NULL UNIQUE,
`p_longitude` REAL NOT NULL UNIQUE,
`p_latitude` REAL NOT NULL UNIQUE
)''')
except dataBase.sqlite3.OperationalError:
print"Table `probe` already exists."
# création de la table value dans la basse de donnée
try:
self.cursor.execute(''' CREATE TABLE `value` (
`v_id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`v_date` NUMERIC,
`v_ozone` REAL,
`v_temperature` REAL,
`v_groundHumidity` REAL,
`v_airHumidity` REAL,
`v_waterTemperature` REAL,
`v_waterLevel` REAL,
`v_probe` INTEGER NOT NULL,
FOREIGN KEY(`v_probe`) REFERENCES probe(`p_id`)
)''')
except dataBase.sqlite3.OperationalError:
print"Table `value` already exists."
print"Data base ready to use."
self.close()
|
python
|
class BinarTree:
def __init__(self,e):
self._root = Node(e)
pass
def add_left(self, e):
pass
def add_roght(self, e):
pass
def replace(self, p, e):
pass
def delete(self, p):
pass
def attach(self,t1, t2):
pass
class Node:
def __ini__(self,element, parent = None, left = None, right = None):
self._element = element
self._left = None
self._right = None
self._parent = parent
|
python
|
# Import libraries
import numpy as np
from flask import Flask, request, jsonify
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
app = Flask(__name__)
@app.route('/api',methods=['GET', 'POST'])
def predict():
# Get the data from the POST request.
data = request.get_json(force=True)
# Load the model
model = SentimentIntensityAnalyzer()
# Make prediction using model loaded from disk as per the data.
prediction = model.polarity_scores(data['texts'])
# prediction = data['texts']
output = prediction['compound']
return jsonify(output)
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
python
|
def gcd(a, b):
while b:
t = b
b = a % b
a = t
return a
def f():
limit = 3000
print('computing GCD of all pairs of integers in [1, ' + repr(limit) + ']^2')
x = limit
while x > 0:
y = limit
while y > 0:
gcd(x, y)
# a = x
# b = y
# while b:
# t = b
# b = a % b
# a = t
y = y - 1
x = x - 1
f()
print('done')
|
python
|
import os
import sys
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import skimage
from samples.sunrgbd import sun, dataset, sun_config
from mrcnn.model import log
import mrcnn.model as modellib
from mrcnn import visualize
ROOT_DIR = os.path.abspath("./")
sys.path.append(ROOT_DIR) # To find local version of the library
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
HOLOLENS_MODE = False
DEPTH_MODE = True
HOLOLENS_IMAGE_PATHS = os.path.abspath("./images")
CLASS_NAMES = ['BG']
CLASS_NAMES.extend(sun_config.CLASSES)
config = sun_config.SunConfig(depth_mode=DEPTH_MODE)
config.display()
SUN_DIR = 'C:/Users/Yannick/Downloads/SUNRGBD/'
SUN_WEIGHTS_PATH = os.path.join(
ROOT_DIR, 'logs/reduced_classes/best_models/depth_strength3_num2.h5')
# SUN_WEIGHTS_PATH = os.path.join(
# ROOT_DIR, 'logs/reduced_classes/best_models/plain_0005.h5')
IGNORE_IMAGES_PATH = os.path.abspath('../skip_image_paths.txt')
sun.ROOT_DIR = ROOT_DIR
sun_config.ROOT_DIR = ROOT_DIR
dataset.ROOT_DIR = ROOT_DIR
DEVICE = "/cpu:0" # /cpu:0 or /gpu:0
TEST_MODE = "inference"
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 0
IMAGES_PER_GPU = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_ax(rows=1, cols=1, size=14):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
def main():
assert not (
DEPTH_MODE and HOLOLENS_MODE), "No depth channel for Hololens available"
# Set up model
config = InferenceConfig(depth_mode=DEPTH_MODE)
config.BATCH_SIZE = 1
config.DETECTION_MIN_CONFIDENCE = 0.8
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
model.load_weights(SUN_WEIGHTS_PATH, by_name=True)
if HOLOLENS_MODE:
visualize_hololens(model)
else:
visualize_sun(model)
def visualize_hololens(model):
for image_name in os.listdir(HOLOLENS_IMAGE_PATHS):
if image_name[-4:] == '.jpg':
rgb_path = os.path.join(HOLOLENS_IMAGE_PATHS, image_name)
image = skimage.io.imread(rgb_path, plugin='pil')
results = model.detect([image], verbose=1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'],
r['class_ids'],
CLASS_NAMES, r['scores'],
title="Predictions")
def visualize_sun(model):
if DEPTH_MODE:
sun_dataset = dataset.SunDataset3D(skip_images_path=IGNORE_IMAGES_PATH)
else:
sun_dataset = dataset.SunDataset2D(skip_images_path=IGNORE_IMAGES_PATH)
sun_dataset.load_sun(SUN_DIR, subset="test")
# Must call before using the dataset
sun_dataset.prepare()
print("Images: {}\nClasses: {}".format(
len(sun_dataset.image_ids), sun_dataset.class_names))
test_sample_ids = [684, 1065, 854, 717, 44]
test_sample_ids.extend(sun_dataset.image_ids)
for image_id in test_sample_ids:
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(
sun_dataset, config, image_id, use_mini_mask=False)
info = sun_dataset.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
sun_dataset.image_reference(image_id)))
results = model.detect([image], verbose=1)
ax = get_ax(1)
r = results[0]
print(r['scores'])
if DEPTH_MODE:
image = image[:, :, :3]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
sun_dataset.class_names, r['scores'], ax=ax,
title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
if __name__ == '__main__':
main()
|
python
|
import csv
import sqlite3
import os
from HoundSploit.searcher.engine.utils import check_file_existence
def create_db():
"""
Create the database used by HoundSploit and hsploit
"""
init_path = os.path.abspath(os.path.expanduser("~") + "/.HoundSploit")
db_path = os.path.abspath(init_path + "/hound_db.sqlite3")
con = sqlite3.connect(db_path)
cur = con.cursor()
cur.execute("CREATE TABLE searcher_exploit (id, file, description, date, author, type, platform, port);")
exploits_path = init_path + "/exploitdb/files_exploits.csv"
with open(exploits_path, 'r', encoding="utf8") as fin:
dr = csv.DictReader(fin)
to_db = [(i['id'], i['file'], i['description'], i['date'], i['author'], i['type'], i['platform'], i['port']) for i in dr]
cur.executemany("INSERT INTO searcher_exploit (id, file, description, date, author, type, platform, port) VALUES (?, ?, ?, ?, ?, ?, ?, ?);", to_db)
cur.execute("CREATE TABLE searcher_shellcode (id, file, description, date, author, type, platform);")
shellcodes_path = init_path + "/exploitdb/files_shellcodes.csv"
with open(shellcodes_path, 'r', encoding="utf8") as fin:
dr = csv.DictReader(fin)
to_db = [(i['id'], i['file'], i['description'], i['date'], i['author'], i['type'], i['platform']) for i in dr]
cur.executemany("INSERT INTO searcher_shellcode (id, file, description, date, author, type, platform) VALUES (?, ?, ?, ?, ?, ?, ?);", to_db)
cur.execute("CREATE TABLE searcher_suggestion (searched, suggestion, autoreplacement);")
suggestions_path = os.path.abspath(init_path + "/houndsploit/csv/files_suggestions.csv")
with open(suggestions_path, 'r', encoding="utf8") as fin:
dr = csv.DictReader(fin)
to_db = [(i['searched'], i['suggestion'], i['autoreplacement']) for i in dr]
cur.executemany("INSERT INTO searcher_suggestion (searched, suggestion, autoreplacement) VALUES (?, ?, ?);", to_db)
custom_suggestions_path = os.path.abspath(init_path + "/custom_suggestions.csv")
if check_file_existence(custom_suggestions_path):
with open(custom_suggestions_path, 'r', encoding="utf8") as fin:
dr = csv.DictReader(fin)
to_db = [(i['searched'], i['suggestion'], i['autoreplacement']) for i in dr]
cur.executemany("INSERT INTO searcher_suggestion (searched, suggestion, autoreplacement) VALUES (?, ?, ?);", to_db)
cur.execute("CREATE TABLE searcher_bookmark (vulnerability_id, vulnerability_class, date);")
custom_bookmarks_path = os.path.abspath(init_path + "/bookmarks.csv")
if check_file_existence(custom_bookmarks_path):
with open(custom_bookmarks_path, 'r', encoding="utf8") as fin:
dr = csv.DictReader(fin)
to_db = [(i['vulnerability_id'], i['vulnerability_class'], i['date']) for i in dr]
cur.executemany("INSERT INTO searcher_bookmark (vulnerability_id, vulnerability_class, date) VALUES (?, ?, ?);", to_db)
con.commit()
con.close()
try:
f = open(os.path.abspath(init_path + "/.HoundSploit/houndsploit_db.lock"))
f.close()
os.remove(os.path.abspath(init_path + "/.HoundSploit/houndsploit_db.lock"))
except IOError:
pass
|
python
|
import asyncio
import logging
import os
import sys
from argparse import SUPPRESS, ArgumentParser
from typing import Callable, Dict, Mapping
import yaml
from aioregistry import (
AsyncRegistryClient,
ChainedCredentialStore,
DockerCredentialStore,
default_credential_store,
)
from tplbuild.cmd.base_build import BaseBuildUtility
from tplbuild.cmd.base_lookup import BaseLookupUtility
from tplbuild.cmd.base_prune import BasePruneUtility
from tplbuild.cmd.build import BuildUtility
from tplbuild.cmd.publish import PublishUtility
from tplbuild.cmd.source_lookup import SourceLookupUtility
from tplbuild.cmd.source_update import SourceUpdateUtility
from tplbuild.cmd.utility import CliUtility
from tplbuild.config import UserConfig
from tplbuild.exceptions import TplBuildException
from tplbuild.tplbuild import TplBuild
LOGGER = logging.getLogger(__name__)
ALL_UTILITIES: Mapping[str, Callable[[], CliUtility]] = {
"build": BuildUtility,
"base-build": BaseBuildUtility,
"base-lookup": BaseLookupUtility,
"base-prune": BasePruneUtility,
"publish": PublishUtility,
"source-lookup": SourceLookupUtility,
"source-update": SourceUpdateUtility,
}
def create_main_parser(utilities: Mapping[str, CliUtility]) -> ArgumentParser:
"""Setup the argument parser configuration for each utility."""
parents = [
create_base_parser(),
create_config_parser(),
]
parser = ArgumentParser(
description="templated build tool",
parents=parents,
)
subparsers = parser.add_subparsers(
required=True,
dest="utility",
help="what tplbuild sub-utility to invoke",
)
for subcommand, utility in utilities.items():
utility.setup_parser(subparsers.add_parser(subcommand, parents=parents))
return parser
def create_base_parser() -> ArgumentParser:
"""
Create shared parser for basic CLI options.
"""
parser = ArgumentParser(description="Base tplbuild options", add_help=False)
parser.add_argument(
"--verbose",
"-v",
action="count",
default=SUPPRESS,
)
parser.add_argument(
"-C",
"--base-dir",
required=False,
default=SUPPRESS,
help="Base directory for tplbuild",
)
return parser
def create_config_parser() -> ArgumentParser:
"""
Create shared parser that overrides user configuration tplbuild options.
"""
parser = ArgumentParser(description="Use config options", add_help=False)
parser.add_argument(
"--auth-file",
required=False,
default=SUPPRESS,
help="Path to the container auth file",
)
parser.add_argument(
"--insecure",
required=False,
const=True,
action="store_const",
default=SUPPRESS,
help="Disable server certificate verification",
)
parser.add_argument(
"--cafile",
required=False,
default=SUPPRESS,
help="SSL context CA file",
)
parser.add_argument(
"--capath",
required=False,
default=SUPPRESS,
help="SSL context CA directory",
)
parser.add_argument(
"--load-default-certs",
required=False,
const=True,
action="store_const",
default=SUPPRESS,
help="Load system default certs always",
)
parser.add_argument(
"--build-jobs",
required=False,
default=SUPPRESS,
help="Set max concurrent build jobs",
)
parser.add_argument(
"--push-jobs",
required=False,
default=SUPPRESS,
help="Set max concurrent push or pull jobs",
)
return parser
def setup_logging(verbose: int) -> None:
"""Setup tplbuild default logging based on the verbosity level"""
internal_level, external_level = logging.WARNING, logging.CRITICAL
if verbose > 2:
internal_level, external_level = logging.DEBUG, logging.INFO
elif verbose > 1:
internal_level, external_level = logging.DEBUG, logging.WARNING
elif verbose:
internal_level, external_level = logging.INFO, logging.ERROR
tplbuild_root = logging.getLogger("tplbuild")
tplbuild_root.propagate = False
tplbuild_root.setLevel(internal_level)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(levelname)s: %(message)s"),
)
tplbuild_root.addHandler(handler)
logging.basicConfig(
format="%(levelname)s(%(module)s): %(message)s",
level=external_level,
)
def load_user_config(args) -> UserConfig:
"""Load the user config. Override with settings from args as requested."""
user_config_locations = {
os.path.join(args.base_dir, ".tplbuildconfig.yml"),
os.path.expanduser("~/.tplbuildconfig.yml"),
}
user_config_data: Dict = {}
for user_config_path in user_config_locations:
try:
with open(user_config_path, encoding="utf-8") as fconfig:
user_config_data.update(**yaml.safe_load(fconfig))
except FileNotFoundError:
continue
except (ValueError, TypeError, yaml.YAMLError) as exc:
raise TplBuildException(f"Failed to load user config: {exc}") from exc
try:
user_config = UserConfig(**user_config_data)
except ValueError as exc:
raise TplBuildException(f"Failed to load user config: {exc}") from exc
if args.auth_file:
user_config.auth_file = args.auth_file
if args.insecure:
user_config.ssl_context.insecure = True
if args.cafile:
user_config.ssl_context.cafile = args.cafile
if args.capath:
user_config.ssl_context.capath = args.capath
if args.load_default_certs:
user_config.ssl_context.load_default_certs = True
if args.build_jobs is not None:
if args.build_jobs <= 0:
user_config.build_jobs = os.cpu_count() or 4
else:
user_config.build_jobs = args.build_jobs
if args.push_jobs is not None:
if args.build_jobs <= 0:
user_config.push_jobs = os.cpu_count() or 4
else:
user_config.push_jobs = args.push_jobs
return user_config
def create_registry_client(user_config: UserConfig) -> AsyncRegistryClient:
"""Create an AsyncRegistryClient context from the passed arguments."""
creds = default_credential_store()
if user_config.auth_file:
try:
creds = ChainedCredentialStore(
DockerCredentialStore.from_file(user_config.auth_file),
creds,
)
except FileNotFoundError as exc:
raise TplBuildException(
f"could not open auth file {repr(user_config.auth_file)}"
) from exc
return AsyncRegistryClient(
creds=creds,
ssl_context=user_config.ssl_context.create_context(),
)
def create_tplbld(
args, user_config: UserConfig, registry_client: AsyncRegistryClient
) -> TplBuild:
"""Create a TplBuild context from the passed arguments."""
return TplBuild.from_path(
args.base_dir, user_config=user_config, registry_client=registry_client
)
def apply_default_args(args) -> None:
"""
Apply default valeus to CLI arguments as needed. The normal default behavior
of argparse does not work well with parsers shared across subparsers.
"""
defaults = dict(
verbose=0,
base_dir=".",
auth_file=None,
insecure=False,
cafile=None,
capath=None,
load_default_certs=False,
build_jobs=None,
push_jobs=None,
)
for key, val in defaults.items():
setattr(args, key, getattr(args, key, val))
async def amain() -> int:
"""Parse CLI options, setup logging, then invoke the requested utility"""
utilities = {
subcommand: utility_cls() for subcommand, utility_cls in ALL_UTILITIES.items()
}
parser = create_main_parser(utilities)
args = parser.parse_args()
apply_default_args(args)
setup_logging(args.verbose)
try:
user_config = load_user_config(args)
async with create_registry_client(user_config) as registry_client:
async with create_tplbld(args, user_config, registry_client) as tplbld:
return await utilities[args.utility].main(args, tplbld)
except TplBuildException as exc:
sys.stderr.write(f"{exc}\n")
if exc.more_message:
sys.stderr.write(f"{exc.more_message}\n")
LOGGER.debug("got top level tplbuild exception", exc_info=True)
return 1
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unexpected top-level exception")
return 2
def main() -> int:
"""Synchronous entry point"""
return asyncio.run(amain())
if __name__ == "__main__":
sys.exit(main())
|
python
|
import logging
import time
import pytest
import os
from stepfunctions.workflow import Workflow
from tests import config
from tests.integration_tests.utils import VersionChecker, LineageChecker
from tests.integration_tests.workflows import simple_pipeline,\
diff_output_workflow, \
condition_workflow, \
parallel_states_workflow, \
diff_cache_param_workflow, \
long_workflow, \
complex_workflow
os.environ['AWS_DEFAULT_REGION'] = config.REGION
logging.basicConfig(level=logging.INFO)
test_data = []
test_data += simple_pipeline.SimpleWorkflow.generate_cases()
test_data += diff_output_workflow.DiffOutWorkflow.generate_cases()
test_data += condition_workflow.ConditionWorkflow.generate_cases()
test_data += parallel_states_workflow.ParallelWorkflow.generate_cases()
test_data += diff_cache_param_workflow.DiffCacheParamWorkflow.generate_cases()
test_data += long_workflow.LongWorkflow.generate_cases()
test_data += complex_workflow.ComplexWorkflow.generate_cases()
@pytest.mark.parametrize('workflow_name, definition, inputs, exp_output, bundle_names, gaps', test_data)
def test_workflow(workflow_name, definition, inputs, exp_output, bundle_names, gaps):
target_flow = [flow for flow in Workflow.list_workflows() if flow['name'] == workflow_name]
if len(target_flow) > 0:
workflow = Workflow.attach(target_flow[0]['stateMachineArn'])
workflow.update(definition=definition, role=config.EXECUTION_ROLE)
else:
workflow = Workflow(workflow_name, definition=definition, role=config.EXECUTION_ROLE)
workflow.create()
# this sleep cmd is vital as we should wait until the update comes into effect
time.sleep(10)
# # initialize version checker before the execution to record bundle versions
checker = VersionChecker(context=config.CONTEXT, bundle_names=bundle_names)
execution = workflow.execute(inputs=inputs)
result = execution.get_output(wait=True)
assert result == exp_output, 'Returned result {} doesn\'t match expected {}'.format(result, exp_output)
# check if new bundle exists
for bd, gap in zip(bundle_names, gaps):
checker.validate_execution(bd=bd, expected_version_gap=gap)
|
python
|
#!/usr/bin/python3
# coding: utf-8
################################################################################
# Apple PiのLCDとLEDへAmbientの状態を表示する
#
# 準備:
# AmbientのKeyを(https://ambidata.io)で取得し、ambient_chidとambient_rkeyへ代入
#
# Copyright (c) 2018-2019 Wataru KUNINO
################################################################################
import json
import urllib.request
import datetime
from time import sleep
import ApplePi.initLCD
import ApplePi.onLED1
import ApplePi.onLED2
import ApplePi.offLED1
import ApplePi.offLED2
import subprocess
ambient_chid='0000' # ここにAmbientで取得したチャネルIDを入力
ambient_rkey='0123456789abcdef' # リードキーを入力 ※ライトキーではない
amdient_tag='d1' # データ番号d1~d8のいずれかを入力
ap_locate='ApplePi/locateLCD.py'
ap_print='ApplePi/printLCD.py'
ap_led1=['ApplePi/offLED1.py','ApplePi/onLED1.py']
ap_led2=['ApplePi/offLED2.py','ApplePi/onLED2.py']
while True:
url = 'https://ambidata.io/api/v2/channels/'+ambient_chid+'/data\?readKey='+ambient_rkey+'&n=1'
post = urllib.request.Request(url)
res = urllib.request.urlopen(post)
if res:
payl = json.loads(res.read().decode())
# print('Response:', payl)
if amdient_tag in payl[0]:
val = int(payl[0][amdient_tag])
date= payl[0]['created']
date = datetime.datetime.strptime(date,"%Y-%m-%dT%H:%M:%S.%fZ")
date += datetime.timedelta(hours=9)
print(date.strftime('%Y/%m/%d %H:%M'), end='') # 日付を出力
print(',',val) # 受信データを出力
# ApplePiへの表示・出力
subprocess.call([ap_locate,'0','0'])
subprocess.call([ap_print,date.strftime('%Y/%m/%d')[2:]])
subprocess.call([ap_locate,'0','1'])
subprocess.call([ap_print,date.strftime('%H:%M')])
subprocess.call([ap_locate,'7','1'])
subprocess.call([ap_print,str(val)])
if val <= 0:
subprocess.call([ap_led1[0]])
subprocess.call([ap_led2[0]])
elif val == 1:
subprocess.call([ap_led1[0]])
subprocess.call([ap_led2[1]])
elif val >= 2:
subprocess.call([ap_led1[1]])
subprocess.call([ap_led2[1]])
sleep(20)
|
python
|
import numpy as np
signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
fourier = np.fft.fft(signal)
print(fourier)
print(signal.size)
freq = np.fft.fftfreq(signal.size, d=0.1)
print(freq)
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-07 22:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mars', '0010_teammember_role'),
]
operations = [
migrations.AlterModelOptions(
name='teammember',
options={'ordering': ['position', 'name'], 'verbose_name': 'Team Member', 'verbose_name_plural': 'Team Members'},
),
]
|
python
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
OEMC integrator for hybrid MC/MD simulations
DESCRIPTION
This module provides OEMC integrators for OpenMM.
EXAMPLES
COPYRIGHT
@author Hyuntae Jung <[email protected]>
All code in this repository is released under the MIT License.
This program is free software: you can redistribute it and/or modify it under
the terms of the MIT License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the MIT License for more details.
You should have received a copy of the MIT License along with this program.
"""
# ============================================================================================
# GLOBAL IMPORTS
# ============================================================================================
import logging
import simtk.unit as unit
import simtk.openmm as mm
from openmm_oemc.constants import
logger = logging.getLogger(__name__)
# Energy unit used by OpenMM unit system
_OPENMM_ENERGY_UNIT = unit.kilojoules_per_mole
# ============================================================================================
# BASE CLASSES
# ============================================================================================
if __name__ == '__main__':
import doctest
doctest.testmod()
|
python
|
import discord
import random
from discord.ext import commands
from gameConfig import *
import os
coin = [0, 1]
@bot.command()
async def coinflip(ctx):
embed = discord.Embed(
title = "Coinflip💰",
description = 'React with the emoji below to choose heads or tails \nHeads: 💸 \nTails: 🦨',
color = discord.Color.gold(),
)
await ctx.channel.purge(limit=5)
msg = await ctx.send(embed=embed)
await msg.add_reaction('💸')
await msg.add_reaction('🦨')
def checkReaction(reaction, user):
return user != bot.user and (str(reaction.emoji) == '💸' or str(reaction.emoji) == '🦨')
reaction, user = await bot.wait_for("reaction_add", check = checkReaction)
#Heads roll
if str(reaction.emoji) == '💸':
await ctx.send("You chose *Heads*, good luck! Flipping....")
if random.choice(coin) == 0:
await ctx.send("Coin lands on **Heads**")
await ctx.send("Congrats! You win!!🏆 (Use **-reset** to leave game)")
elif random.choice(coin) == 1:
await ctx.send("Coin lands on **Tails**")
await ctx.send("Sorry, you lost💩 (Use **-reset** to leave game)")
#Tails roll
elif str(reaction.emoji) == '🦨':
await ctx.send("You chose *Tails*, good luck! Flipping....")
if random.choice(coin) == 1:
await ctx.send("Coin lands on **Tails**")
await ctx.send("Congrats! You win!!🏆 (Use **-reset** to leave game)")
elif random.choice(coin) == 0:
await ctx.send("Coin lands on **Heads**")
await ctx.send("Sorry, you lost💩 (Use **-reset** to leave game)")
@bot.command()
async def reset(ctx):
await ctx.send("Resetting files...use **-menu** for the game menu")
os.system('python startup.py')
pass
bot.run(TOKEN, bot=True, reconnect=True)
client.run(TOKEN)
|
python
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# Tanjun Examples - A collection of examples for Tanjun.
# Written in 2021 by Lucina [email protected]
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain worldwide.
# This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along with this software.
# If not, see <https://creativecommons.org/publicdomain/zero/1.0/>.
"""Example of how to run a standard Tanjun client instance with a RESTBot."""
import asyncio
import hikari
import tanjun
from examples import config
from examples import impls
from examples import protos
async def run() -> None:
loaded_config = config.ExampleConfig.load()
# While a BOT token is assumed in this example, a client credentials OAuth2
# token can also be used with Tanjun but this may limit functionality.
bot = hikari.RESTBot(loaded_config.bot_token, hikari.TokenType.BOT)
database = impls.DatabaseImpl()
client = (
# Passing True for declare_global_commands here instructs the client to
# declare the slash commands within it which are marked as "global" during
# the first startup.
# A guild ID may also be passed here to instruct it to just declare the
# global commands for that guild, this can be helpful for debug purposes.
tanjun.Client.from_rest_bot(bot, declare_global_commands=True)
# Unlike a gateway bot bound client, only slash commands will be automatically
# executed by a client that's bound to a rest bot.
.load_modules("examples.slash_component")
.set_type_dependency(config.ExampleConfig, loaded_config)
.set_type_dependency(protos.DatabaseProto, database)
# Here we use client callbacks to manage the database, STOPPING can also be used to stop it.
.add_client_callback(tanjun.ClientCallbackNames.STARTING, database.connect)
)
# Unlike with a gateway bot, for RESTBots hikari has no lifetime event
# dispatch which can be used to implicitly startup and close the Tanjun
# client. Instead, we must directly startup and close Tanjun.
await bot.start()
# Note that starting a Tanjun client before the relevant bot may lead
# to erroneous behaviour as it won't be able to make any requests.
# While this example uses the client as a context manager to implicitly start
# and close it, the `open` and `close` methods can alternatively be used to the same effect.
async with client:
await bot.join() # This waits until the bot is closed before closing Tanjun by exiting the context manager.
def main():
asyncio.run(run())
if __name__ == "__main__":
main()
|
python
|
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import tensorflow as tf
import keras.backend as K
from keras.utils import to_categorical
from keras import metrics
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum
from keras.layers.core import Lambda, RepeatVector, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D
from keras.layers.merge import concatenate, add
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from sklearn.utils import class_weight
from keras.callbacks import ModelCheckpoint
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
import os
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
# from medpy.io import load
import numpy as np
#import cv2
import nibabel as nib
from PIL import Image
def conv_block(input_mat,num_filters,kernel_size,batch_norm):
X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat)
if batch_norm:
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X)
if batch_norm:
X = BatchNormalization()(X)
X = Activation('relu')(X)
return X
def Unet_3d(input_img, n_filters = 8, dropout = 0.2, batch_norm = True):
c1 = conv_block(input_img,n_filters,3,batch_norm)
p1 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(c1)
p1 = Dropout(dropout)(p1)
c2 = conv_block(p1,n_filters*2,3,batch_norm);
p2 = MaxPooling3D(pool_size=(2,2,2) ,strides=2)(c2)
p2 = Dropout(dropout)(p2)
c3 = conv_block(p2,n_filters*4,3,batch_norm);
p3 = MaxPooling3D(pool_size=(2,2,2) ,strides=2)(c3)
p3 = Dropout(dropout)(p3)
c4 = conv_block(p3,n_filters*8,3,batch_norm);
p4 = MaxPooling3D(pool_size=(2,2,2) ,strides=2)(c4)
p4 = Dropout(dropout)(p4)
c5 = conv_block(p4,n_filters*16,3,batch_norm);
u6 = Conv3DTranspose(n_filters*8, (3,3,3), strides=(2, 2, 2), padding='same')(c5);
u6 = concatenate([u6,c4]);
c6 = conv_block(u6,n_filters*8,3,batch_norm)
c6 = Dropout(dropout)(c6)
u7 = Conv3DTranspose(n_filters*4,(3,3,3),strides = (2,2,2) , padding= 'same')(c6);
u7 = concatenate([u7,c3]);
c7 = conv_block(u7,n_filters*4,3,batch_norm)
c7 = Dropout(dropout)(c7)
u8 = Conv3DTranspose(n_filters*2,(3,3,3),strides = (2,2,2) , padding='same')(c7);
u8 = concatenate([u8,c2]);
c8 = conv_block(u8,n_filters*2,3,batch_norm)
c8 = Dropout(dropout)(c8)
u9 = Conv3DTranspose(n_filters,(3,3,3),strides = (2,2,2) , padding='same')(c8);
u9 = concatenate([u9,c1]);
c9 = conv_block(u9,n_filters,3,batch_norm)
outputs = Conv3D(4, (1, 1,1), activation='softmax')(c9)
print("!!!!!!!!!!!!!!!!!!!")
print(outputs.shape)
model = Model(inputs=input_img, outputs=outputs)
return model
def standardize(image):
standardized_image = np.zeros(image.shape)
#
# iterate over the `z` dimension
for z in range(image.shape[2]):
# get a slice of the image
# at channel c and z-th dimension `z`
image_slice = image[:,:,z]
# subtract the mean from image_slice
centered = image_slice - np.mean(image_slice)
# divide by the standard deviation (only if it is different from zero)
if(np.std(centered)!=0):
centered = centered/np.std(centered)
# update the slice of standardized image
# with the scaled centered and scaled image
standardized_image[:, :, z] = centered
### END CODE HERE ###
return standardized_image
def dice_coef(y_true, y_pred, epsilon=0.00001):
"""
Dice = (2*|X & Y|)/ (|X|+ |Y|)
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2))
ref: https://arxiv.org/pdf/1606.04797v1.pdf
"""
axis = (0,1,2,3)
dice_numerator = 2. * K.sum(y_true * y_pred, axis=axis) + epsilon
dice_denominator = K.sum(y_true*y_true, axis=axis) + K.sum(y_pred*y_pred, axis=axis) + epsilon
return K.mean((dice_numerator)/(dice_denominator))
def dice_coef_loss(y_true, y_pred):
return 1-dice_coef(y_true, y_pred)
input_img = Input((128,128,128,4))
model = Unet_3d(input_img,8,0.1,True)
learning_rate = 0.001
epochs = 5000
decay_rate = 0.0000001
model.compile(optimizer=Adam(lr=learning_rate, decay = decay_rate), loss=dice_coef_loss, metrics=[dice_coef])
model.summary()
path = '../input/vs-brats2018/miccai_brats_2018_data_training/HGG'
all_images = os.listdir(path)
#print(len(all_images))
all_images.sort()
data = np.zeros((240,240,155,4))
image_data2=np.zeros((240,240,155))
for epochs in range(60):
for image_num in range(180):
# data preprocessing starts here
x = all_images[image_num]
print(x)
folder_path = path + '/' + x;
modalities = os.listdir(folder_path)
modalities.sort()
#data = []
w = 0
for j in range(len(modalities)-1):
#print(modalities[j])
image_path = folder_path + '/' + modalities[j]
if(image_path[-7:-1] + image_path[-1] == 'seg.nii'):
img = nib.load(image_path);
image_data2 = img.get_data()
image_data2 = np.asarray(image_data2)
print("Entered ground truth")
else:
img = nib.load(image_path);
image_data = img.get_data()
image_data = np.asarray(image_data)
image_data = standardize(image_data)
data[:,:,:,w] = image_data
print("Entered modality")
w = w+1
print(data.shape)
print(image_data2.shape)
reshaped_data=data[56:184,80:208,13:141,:]
reshaped_image_data2=image_data2[56:184,80:208,13:141]
for v in range(128):
print("x")
plt.imshow(reshaped_data[:,:,v,0])
plt.show(block=False)
plt.pause(1)
plt.close()
print("y")
imgplot = plt.imshow(reshaped_image_data2[:,:,v])
plt.show(block=False)
plt.pause(1)
plt.close()
print("new")
reshaped_data=reshaped_data.reshape(1,128,128,128,4)
reshaped_image_data2=reshaped_image_data2.reshape(1,128,128,128)
reshaped_image_data2[reshaped_image_data2==4] = 3
hello = reshaped_image_data2.flatten()
#y_to = keras.utils.to_categorical(y_to,num_classes=2)
print(reshaped_image_data2.shape)
#print(hello[hello==3].shape)
print("Number of classes",np.unique(hello))
class_weights = class_weight.compute_class_weight('balanced',np.unique(hello),hello)
print(class_weights)
reshaped_image_data2 = to_categorical(reshaped_image_data2, num_classes = 4)
print(reshaped_data.shape)
print(reshaped_image_data2.shape)
print(type(reshaped_data))
model.fit(x=reshaped_data,y=reshaped_image_data2, epochs = 1 , class_weight = class_weights)
model.save('../working/3d_model.h5')
model.save('../working/3d_model.h5')
|
python
|
"""
convert prepared resnet model into tflite model
run this python script in server root
"""
import argparse
import os
import tensorflow as tf
from models import mobilenet, resnet50
from tensorflow import lite as tf_lite
TFLITE_MODEL_DIR = '../client/app/src/main/assets'
def convert_resnet() -> None:
CHECKPOINT_DIR = './checkpoints/resnet50'
checkpoint = tf.train.latest_checkpoint(CHECKPOINT_DIR)
model = resnet50(1001)
model.load_weights(checkpoint)
converter = tf_lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
if not os.path.exists(TFLITE_MODEL_DIR):
os.mkdir(TFLITE_MODEL_DIR)
with open(os.path.join(TFLITE_MODEL_DIR, 'resnet50.tflite'), 'wb') as f:
f.write(tflite_model)
def convert_mobilenet() -> None:
# Create tf model
CHECKPOINT_PATH = './checkpoints/mobilenet_v1_1.0_224/mobilenet_1_0_224_tf.h5'
model = mobilenet()
model.load_weights(CHECKPOINT_PATH)
# Convert to tflite
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
if not os.path.exists(TFLITE_MODEL_DIR):
os.mkdir(TFLITE_MODEL_DIR)
# Save tflite model
with open(os.path.join(TFLITE_MODEL_DIR, 'mobilenet_v1.tflite'), 'wb') as f:
f.write(tflite_model)
def main(args) -> None:
if args.model == 'all':
convert_resnet()
convert_mobilenet()
elif args.model == 'resnet50':
convert_resnet()
elif args.model == 'mobilenet':
convert_mobilenet()
else:
raise ValueError("Not supported model: {}".format(args.model))
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = ''
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-m', '--model', default='all',
help='model to convert')
args = parser.parse_args()
main(args)
|
python
|
# Copyright 1996-2018 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test textures."""
import unittest
import os
import fnmatch
from PIL import Image
class TestTextures(unittest.TestCase):
"""Unit test of the textures."""
def setUp(self):
"""Get all the textures to be tested."""
# 1. Get all the images from projects and resources
images = []
for directory in ['projects', 'resources']:
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME'] + os.sep + directory):
for fileName in fnmatch.filter(fileNames, '*.png'):
image = os.path.join(rootPath, fileName)
images.append(image)
for fileName in fnmatch.filter(fileNames, '*.jpg'):
image = os.path.join(rootPath, fileName)
images.append(image)
# 2. filter-out the images which are not textures
self.textures = []
for image in images:
if not (
'controllers' in image or
'icons' in image or
'libraries' in image or
'plugins' in image or
'simulator-sdk' in image or
'resources' + os.sep + 'images' in image or
'resources' + os.sep + 'web' in image or
'resources' + os.sep + 'wren' in image
):
self.textures.append(image)
def test_textures_dimensions_are_power_of_two(self):
"""Test that the released textures dimensions are power of two."""
def is_perfect_power_of_two(a):
assert isinstance(a, int)
while a % 2 == 0:
a = a / 2
if a == 1:
return True
return False
for texture in self.textures:
im = Image.open(texture)
self.assertTrue(
is_perfect_power_of_two(im.size[0]) and is_perfect_power_of_two(im.size[1]),
msg='texture "%s": dimension is not a power of two: (%d, %d)' % (texture, im.size[0], im.size[1])
)
def test_textures_profile(self):
"""Test that the released textures don't contain an ICC profile."""
for texture in self.textures:
im = Image.open(texture)
self.assertTrue(
im.info.get("icc_profile") is None,
msg='texture "%s" contains an ICC profile' % (texture)
)
if __name__ == '__main__':
unittest.main()
|
python
|
from __future__ import print_function
import sys
import os
import runpy
import traceback
import contextlib
import argparse
from tracestack.handler import ExceptionHandler
from tracestack.console import TracestackConsole
def run():
"""Runs the script provided by the arguments, using the
tracestack exception handler.
"""
parser = _build_parser()
args = vars(parser.parse_args())
print(args)
script = args.pop("script")
handler = ExceptionHandler(**args)
if script:
# set up the system variables
sys.argv = sys.argv[1:]
sys.path[0] = os.path.dirname(os.path.abspath(script))
try:
runpy.run_path(script, run_name="__main__")
except:
einfo = sys.exc_info()
_print_clean_traceback(einfo)
handler.handle_error(*einfo)
else:
# no additional arguments were given; run the REPL
console = TracestackConsole(**args)
console.interact()
def _build_parser():
"""Returns the argument parser (which is built using argparse)."""
parser = argparse.ArgumentParser(description='instantly search your ' + \
'Python error messages on' + \
' the web',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('script', metavar='SCRIPT', type=str, nargs='?',
help='the Python script')
parser.add_argument('arguments', metavar='ARGUMENTS', type=str, nargs='*',
help='any arguments to the script')
parser.add_argument('-p', '--prompt', help='prompt the user rather than immediately searching',
action='store_true')
parser.add_argument('-e',
'--engine',
help="""the search engine to use:
'default': Google search limited to stackoverflow.com
'google': Google search of the full web
'stackoverflow': StackOverflow site search""",
default="default",
choices=['default', 'google', 'stackoverflow'],
metavar="ENGINE",
type=str)
return parser
def _print_clean_traceback(einfo):
"""Print the traceback, without showing all the overhead added by tracestack."""
extracted = traceback.extract_tb(einfo[2])
if extracted[-1][0] in ('trace', 'runpy.py'):
# the error call is coming from inside the house
# this shouldn't happen, but if it does, do the default behavior
sys.__excepthook__(sys.exc_info)
else:
# remove the traceback levels that relate to runpy or trace
extracted = [level for level in
extracted if
os.path.basename(level[0]) not in ('command_line.py', 'runpy.py')]
extracted = traceback.format_list(extracted)
# print as if it were a normal traceback
print("Traceback (most recent call last):", file=sys.stderr)
for level in extracted:
print(level, end="", file=sys.stderr)
for line in traceback.format_exception_only(einfo[0], einfo[1]):
print(line, end="", file=sys.stderr)
|
python
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
# pylint: disable=invalid-name
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REPO_DIR = os.path.dirname(BASE_DIR)
# Load environment variables from .env
env = environ.Env()
env_file = os.path.join(REPO_DIR, ".env")
if os.path.exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=[])
STELLAR_ACCOUNT_ADDRESS = env("STELLAR_ACCOUNT_ADDRESS")
STELLAR_ACCOUNT_SEED = env("STELLAR_ACCOUNT_SEED")
STELLAR_NETWORK = env("STELLAR_NETWORK", default="TESTNET")
HORIZON_URI = env("HORIZON_URI", default="https://horizon-testnet.stellar.org/")
REDIS_URL = env("REDIS_URL", default=None)
SERVER_JWT_KEY = env("SERVER_JWT_KEY")
OPERATION_DEPOSIT = "deposit"
OPERATION_WITHDRAWAL = "withdraw"
ACCOUNT_STARTING_BALANCE = str(2.01)
WITHDRAW_AUTH_REQUIRED = env("WITHDRAW_AUTH_REQUIRED", default=False)
DEPOSIT_AUTH_REQUIRED = env("DEPOSIT_AUTH_REQUIRED", default=False)
FEE_AUTH_REQUIRED = env("FEE_AUTH_REQUIRED", default=False)
TRANSACTIONS_AUTH_REQUIRED = env("TRANSACTIONS_AUTH_REQUIRED", default=False)
TRANSACTION_AUTH_REQUIRED = env("TRANSACTION_AUTH_REQUIRED", default=False)
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = ["rest_framework", "corsheaders"]
CUSTOM_APPS = ["info", "transaction"]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + CUSTOM_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
APPEND_SLASH = False
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "app.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": env.db(
"DATABASE_URL", default="sqlite:///" + os.path.join(REPO_DIR, "db.sqlite3")
)
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Django Rest Framework Settings:
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
}
# API Config
DEFAULT_PAGE_SIZE = 10
# Logging config
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": (
"%(asctime)s [%(process)d] [%(levelname)s] "
+ "pathname=%(pathname)s lineno=%(lineno)s "
+ "funcname=%(funcName)s %(message)s"
),
"datefmt": "%Y-%m-%d %H:%M:%S",
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"null": {"level": "DEBUG", "class": "logging.NullHandler"},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {"testlogger": {"handlers": ["console"], "level": "INFO"}},
}
# CORS configuration
CORS_ORIGIN_ALLOW_ALL = True
# Celery config
CELERY_BROKER_URL = REDIS_URL
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
|
python
|
"""
Tyk API Management.
"""
from diagrams import Node
class _Tyk(Node):
_provider = "tyk"
_icon_dir = "resources/tyk"
fontcolor = "#2d3436"
|
python
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from h2o.utils.compatibility import * # NOQA
from .model_base import ModelBase
from .metrics_base import * # NOQA
import h2o
from h2o.expr import ExprNode
class H2OWordEmbeddingModel(ModelBase):
"""
Word embedding model.
"""
def find_synonyms(self, word, count=20):
"""
Find synonyms using a word2vec model.
:param str word: A single word to find synonyms for.
:param int count: The first "count" synonyms will be returned.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api("GET /3/Word2VecSynonyms", data={'model': self.model_id, 'word': word, 'count': count})
return OrderedDict(sorted(zip(j['synonyms'], j['scores']), key=lambda t: t[1], reverse=True))
def transform(self, words, aggregate_method):
"""
Transform words (or sequences of words) to vectors using a word2vec model.
:param str words: An H2OFrame made of a single column containing source words.
:param str aggregate_method: Specifies how to aggregate sequences of words. If method is `NONE`
then no aggregation is performed and each input word is mapped to a single word-vector.
If method is 'AVERAGE' then input is treated as sequences of words delimited by NA.
Each word of a sequences is internally mapped to a vector and vectors belonging to
the same sentence are averaged and returned in the result.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api("GET /3/Word2VecTransform", data={'model': self.model_id, 'words_frame': words.frame_id, 'aggregate_method': aggregate_method})
return h2o.get_frame(j["vectors_frame"]["name"])
def to_frame(self):
"""
Converts a given word2vec model into H2OFrame.
:returns: a frame representing learned word embeddings.
"""
return h2o.H2OFrame._expr(expr=ExprNode("word2vec.to.frame", self))
|
python
|
from pymongo import MongoClient
def mongo_client():
return MongoClient("database", 27017)
|
python
|
from msgpack import Packer
COMMAND_SET_VERSION = 3
class CommandType:
JumpToMain = 1
CRCRegion = 2
Erase = 3
Write = 4
Ping = 5
Read = 6
UpdateConfig = 7
SaveConfig = 8
ReadConfig = 9
GetStatus = 10
def encode_command(command_code, *arguments):
"""
Encodes a command of the given type with given arguments.
"""
p = Packer(use_bin_type=True)
obj = list(arguments)
return p.pack(COMMAND_SET_VERSION) + p.pack(command_code) + p.pack(obj)
def encode_crc_region(address, length):
"""
Encodes the command to request the CRC of a region in flash.
"""
return encode_command(CommandType.CRCRegion, address, length)
def encode_erase_flash_page(address, device_class):
"""
Encodes the command to erase the flash page at given address.
"""
return encode_command(CommandType.Erase, address, device_class)
def encode_write_flash(data, address, device_class):
"""
Encodes the command to write the given data at the given address in a
messagepack byte object.
"""
return encode_command(CommandType.Write, address, device_class, data)
def encode_read_flash(aderess, length):
"""
Encodes the command to read the flash at given address.
"""
return encode_command(CommandType.Read, address, length)
def encode_update_config(data):
"""
Encodes the command to update the config from given MessagePack data.
"""
return encode_command(CommandType.UpdateConfig, data)
def encode_save_config():
"""
Encodes the command to save the config to flash.
"""
return encode_command(CommandType.SaveConfig)
def encode_jump_to_main():
"""
Encodes the command to jump to application using MessagePack.
"""
return encode_command(CommandType.JumpToMain)
def encode_read_config():
"""
Encodes the read config command.
"""
return encode_command(CommandType.ReadConfig)
def encode_ping():
"""
Encodes a ping command.
"""
return encode_command(CommandType.Ping)
def encode_get_status():
"""
Encodes a get status command.
"""
return encode_command(CommandType.GetStatus)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from utility.webdl import WebDLUtility
from service.webdl import WebDLService
from postprocessing.tools import plot_utility_model, utility_grid, save_model
from postprocessing.cfg import *
#%%
df = pd.read_csv("service/data/webdl.csv")
X = df[['cfg_client_maxrate_kbit', 'cfg_delay']]
y = df.webdlc_median
service_model = WebDLService(X, y)
# Unscaled
utility_model = WebDLUtility()
tp,delay,utility = utility_grid(service_model, utility_model,
discrete_delay_cnt=glb_discrete_delay_cnt,
discrete_tp_cnt=glb_discrete_tp_cnt,
max_delay=service_model.max_delay,
min_delay=service_model.min_delay,
min_tp=glb_min_tp,
max_tp=service_model.max_tp)
plot_utility_model(tp, delay, utility)
save_model("postprocessing/models_unscaled/", "webdl", tp, delay, utility)
# Scaled
utility_model = WebDLUtility(scaled=True)
tp,delay,utility = utility_grid(service_model, utility_model,
discrete_delay_cnt=glb_discrete_delay_cnt,
discrete_tp_cnt=glb_discrete_tp_cnt,
max_delay=service_model.max_delay,
min_delay=service_model.min_delay,
min_tp=glb_min_tp,
max_tp=service_model.max_tp)
plot_utility_model(tp, delay, utility)
save_model("postprocessing/models/", "webdl", tp, delay, utility)
|
python
|
XSym
0033
19e4fe6b5fba275cfa63817605c40e9f
/anaconda2/lib/python2.7/types.py
|
python
|
import unittest
import shutil
import SimpleITK as sitk
import numpy as np
from typing import Union, Sequence
from mnts.scripts.dicom2nii import *
from mnts.scripts.normalization import *
from mnts.filters import MNTSFilterGraph
from mnts.filters.intensity import *
from mnts.filters.geom import *
from pathlib import Path
def create_graph() -> MNTSFilterGraph:
r"""Create the normalization graph"""
G = MNTSFilterGraph()
# Add filter nodes to the graph.
G.add_node(SpatialNorm(out_spacing=[0.4492, 0.4492, 0]))
G.add_node(HuangThresholding(closing_kernel_size=10), 0, is_exit=True) # Use mask to better match the histograms
G.add_node(N4ITKBiasFieldCorrection(), [0, 1])
G.add_node(NyulNormalizer(), [2, 1], is_exit=True)
return G
def create_random_boxes(size: Sequence[int], box_size: Sequence[int], intensity: int):
r"""Create an sitk image of size with a random box placed within the image"""
x = np.zeros(size)
corner = [np.random.randint(0, size[i] - box_size[i]) for i in range(len(size))]
s = tuple([slice(corner[i], corner[i] + box_size[i], 1) for i in range(len(size))])
x[s] = intensity
return sitk.GetImageFromArray(x)
"""
Test settings
"""
N = 3 # create 3 images
out_path = Path('./temp_output')
fnames = [f"_temp{i}.nii.gz" for i in range(N)]
test_yaml =\
r"""
SpatialNorm:
out_spacing: [0.5, 0.5, 0]
HuangThresholding:
closing_kernel_size: 10
_ext:
upstream: 0
is_exit: True
N4ITKBiasFieldCorrection:
_ext:
upstream: [0, 1]
NyulNormalizer:
_ext:
upstream: [2, 1]
is_exit: True
"""
class TestScript(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestScript, self).__init__(*args, **kwargs)
TestScript.create_samples()
def test_norm_train(self):
# Create graph
G = create_graph()
G._logger.set_verbose(1)
_train_normalization(G, '.', str(out_path), 0)
def test_norm_train_mpi(self):
# Create graph
G = create_graph()
G._logger.set_verbose(1)
_train_normalization(G, '.', str(out_path), 16)
def test_norm_inference(self):
G = create_graph()
G._logger.set_verbose(1)
_inference_normalization(G, str(out_path.joinpath("Trained_states")), ".", str(out_path), 0)
def test_norm_inference_mpi(self):
G = create_graph()
G._logger.set_verbose(1)
_inference_normalization(G, str(out_path.joinpath("Trained_states")), ".", str(out_path), 16)
def test_console_entry_train(self):
r"""Run this after """
with open('_temp.yaml', 'w') as f:
f.write(test_yaml)
run_graph_train(f"-i . -f ./_temp.yaml -n 16 -v -o {str(out_path)}".split())
Path('_temp.yaml').unlink()
@staticmethod
def create_samples():
x = [create_random_boxes([128, 128, 30], [64, 64, 20], 255) for i in range(N)]
[sitk.WriteImage(sitk.Cast(xx, sitk.sitkInt16), fnames[i]) for i, xx in enumerate(x)]
@staticmethod
def clean_dir():
# Delete temp images and generated files
[Path(f).unlink() for f in fnames]
Path("./default.log").unlink()
shutil.rmtree(str(out_path))
def __del__(self):
TestScript.clean_dir()
if __name__ == '__main__':
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
from click.testing import CliRunner
from simmate.command_line.workflows import workflows
def test_database():
# make the dummy terminal
runner = CliRunner()
# list the workflows
result = runner.invoke(workflows, ["list-all"])
assert result.exit_code == 0
# list the config for one workflow
result = runner.invoke(workflows, ["show-config", "energy_mit"])
assert result.exit_code == 0
# How will I mock the testing of VASP? It will require the database to be configured.
# Also I need Structure fixtures.
# TODO: test setup_only, run, run_cloud
|
python
|
# Copyright 2017 The BerryDB Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
BASE_FLAGS = [
'-Wall',
'-Wextra',
'-Werror',
'-DUSE_CLANG_COMPLETER', # YCM needs this.
'-xc++', # YCM needs this to avoid compiling headers as C code.
]
SOURCE_EXTENSIONS = [ '.cc' ]
HEADER_EXTENSIONS = [ '.h' ]
SOURCE_DIRECTORIES = [ 'src' ]
HEADER_DIRECTORIES = [ 'include', 'platform' ]
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def IsHeaderFile( filename ):
extension = os.path.splitext(filename)[1]
return extension in HEADER_EXTENSIONS
def MakeRelativePathsInFlagsAbsolute(flags, build_root):
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(build_root, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[ len(path_flag): ]
new_flag = path_flag + os.path.join(build_root, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FindNearest(path, target, build_root):
candidate = os.path.join(path, target)
if os.path.isfile(candidate):
return candidate
if path == build_root:
return None
parent = os.path.dirname(path)
if parent == path:
return None
return FindNearest(parent, target, build_root)
def FlagsForClangComplete(file_path, build_root):
clang_complete_path = FindNearest(file_path, '.clang_complete', build_root)
clang_complete_flags = open(clang_complete_path, 'r').read().splitlines()
return clang_complete_flags
def FlagsForFile(filename, **kwargs):
build_root = DirectoryOfThisScript()
file_path = os.path.realpath(filename)
flags = BASE_FLAGS
clang_flags = FlagsForClangComplete(file_path, build_root)
if clang_flags:
flags = flags + clang_flags
final_flags = MakeRelativePathsInFlagsAbsolute(flags, build_root)
return { 'flags': final_flags }
|
python
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory, ServerFactory
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
from Screens.MessageBox import MessageBox
from Tools import Notifications
from GrowleeConnection import emergencyDisable
from . import NOTIFICATIONID
SNP_TCP_PORT = 9887
class SnarlNetworkProtocol(LineReceiver):
def __init__(self, client=False):
self.client = client
def connectionMade(self):
self.factory.addClient(self)
if self.client:
payload = "type=SNP#?version=1.0#?action=register#?app=growlee"
self.sendLine(payload)
payload = "type=SNP#?version=1.0#?action=add_class#?app=growlee#?class=growleeClass#?title=Notifications from your Dreambox"
self.sendLine(payload)
def connectionLost(self, reason):
self.factory.removeClient(self)
def stop(self):
if self.client:
payload = "type=SNP#?version=1.0#?action=unregister#?app=growlee"
self.sendLine(payload)
self.transport.loseConnection()
def sendNotification(self, title='No title.', description='No message.', timeout=1):
if not self.client or not self.transport:
return
payload = "type=SNP#?version=1.0#?action=notification#?app=growlee#?class=growleeClass#?title=%s#?text=%s#?timeout=%d" % (title, description, timeout)
self.sendLine(payload)
def lineReceived(self, data):
if self.client or not self.transport:
return
Len = len(data)
if Len < 23 or not data[:23] == "type=SNP#?version=1.0#?":
return
items = data[23:].split('#?')
title = ''
description = ''
timeout = 5
for item in items:
key, value = item.split('=')
if key == "action":
if value == "unregister":
payload = "SNP/1.0/0/OK"
self.sendLine(payload)
self.transport.loseConnection()
return
elif value != "notification":
# NOTE: we pretend to handle&accept pretty much everything one throws at us
payload = "SNP/1.0/0/OK"
self.sendLine(payload)
return
elif key == "title":
title = value
elif key == "text":
description = value
elif key == "timeout":
timeout = int(value)
Notifications.AddNotificationWithID(
NOTIFICATIONID,
MessageBox,
text=title + '\n' + description,
type=MessageBox.TYPE_INFO,
timeout=timeout,
close_on_any_key=True,
)
# return ok
payload = "SNP/1.0/0/OK"
self.sendLine(payload)
class SnarlNetworkProtocolClientFactory(ClientFactory):
client = None
def buildProtocol(self, addr):
p = SnarlNetworkProtocol(client=True)
p.factory = self
return p
def sendNotification(self, title='No title.', description='No message.', priority=0, timeout=-1):
if self.client:
title = title.decode('utf-8', 'ignore').encode('iso8859-15', 'ignore')
description = description.decode('utf-8', 'ignore').encode('iso8859-15', 'ignore')
# NOTE: timeout = 0 means sticky, so add one second to map -1 to 0 and make 0 non-sticky
if timeout < 1:
timeout += 1
self.client.sendNotification(title=title, description=description, timeout=timeout)
def addClient(self, client):
self.client = client
def removeClient(self, client):
self.client = None
class SnarlNetworkProtocolServerFactory(ServerFactory):
protocol = SnarlNetworkProtocol
def __init__(self):
self.clients = []
def addClient(self, client):
self.clients.append(client)
def removeClient(self, client):
self.clients.remove(client)
def sendNotification(self, *args, **kwargs):
pass
def stopFactory(self):
for client in self.clients:
client.stop()
class SnarlNetworkProtocolAbstraction:
clientPort = None
serverPort = None
pending = 0
def __init__(self, host):
self.clientFactory = SnarlNetworkProtocolClientFactory()
self.serverFactory = SnarlNetworkProtocolServerFactory()
if host.enable_outgoing.value:
reactor.resolve(host.address.value).addCallback(self.gotIP).addErrback(self.noIP)
if host.enable_incoming.value:
self.serverPort = reactor.listenTCP(SNP_TCP_PORT, self.serverFactory)
self.pending += 1
def gotIP(self, ip):
self.clientPort = reactor.connectTCP(ip, SNP_TCP_PORT, self.clientFactory)
self.pending += 1
def noIP(self, error):
emergencyDisable()
def sendNotification(self, *args, **kwargs):
self.clientFactory.sendNotification(*args, **kwargs)
def maybeClose(self, resOrFail, defer=None):
self.pending -= 1
if self.pending == 0:
if defer:
defer.callback(True)
def stop(self):
defer = Deferred()
if self.clientPort:
d = self.clientPort.disconnect()
if d:
d.addBoth(self.maybeClose, defer=defer)
else:
self.pending -= 1
if self.serverPort:
d = self.serverPort.stopListening()
if d:
d.addBoth(self.maybeClose, defer=defer)
else:
self.pending -= 1
if self.pending == 0:
reactor.callLater(1, defer.callback, True)
return defer
|
python
|
import unicodedata
from django import forms
from django.contrib.auth import authenticate, get_user_model, password_validation
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.text import capfirst
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from .models import User, Profile
UserModel = get_user_model()
class UsernameField(forms.CharField):
def to_python(self, value):
return unicodedata.normalize("NFKC", super().to_python(value))
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {"password_mismatch": _("The two password fields didn't match.")}
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = UserModel
fields = ("username", "email")
field_classes = {"username": UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
{"autofocus": True}
)
if self._meta.model.USERNAME_FIELD != "email":
self.fields["email"] = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"], code="password_mismatch"
)
return password2
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get("password2")
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error("password2", error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AdminProfileCreationForm(forms.ModelForm):
email = forms.EmailField()
username = forms.CharField(max_length=150)
parent = forms.CharField(max_length=20, label=_("Upline ID"))
class Meta:
model = Profile
exclude = ("user",)
|
python
|
from typing import Union
from requests import session, Session
import json
import os
from models import ChallengeResult, ChallengeError
BASE_URL = os.getenv('API_URL', 'https://soallpeach-api-soroosh.fandogh.cloud')
session = Session()
def get_session() -> Session:
session.headers.update({
'Authorization': 'TOKEN ' + os.getenv('API_SECRET_KEY', 'STRONG_TOKEN'),
'Content-Type': 'application/json'
})
return session
class ReportRequest(object):
nickname: str
challenge_name: str
run_id: str
result: Union[ChallengeResult, ChallengeError]
state: str
def __init__(self, nickname: str, challenge_name: str, run_id: str, result: Union[ChallengeResult, ChallengeError]):
self.nickname = nickname
self.challenge_name = challenge_name
self.run_id = run_id
self.result = result
self.state = 'PASSED' if isinstance(result, ChallengeResult) else 'FAILED'
def report(nickname: str, challenge_name: str, run_id: str, result: Union[ChallengeResult, ChallengeError]):
request = ReportRequest(nickname, challenge_name, run_id, result)
request_json = json.dumps(request.__dict__, default=lambda o: o.__dict__, indent=4)
response = get_session().post(f'{BASE_URL}/scores', data=request_json)
print(response)
print(response.text)
|
python
|
#!/data2/zhangshuai/anaconda3/bin
# -*- coding: utf-8 -*-
import os
import wave
from pydub import AudioSegment
import json
wav_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata/audio/test"
trans_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata/transcription/test_no_ref_noise"
wav_segments_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata_seg/audio/test"
trans_segments_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata_seg/transcription/test"
wav_files = os.listdir(wav_path)
# wav_files = [wav_path + "/" + f for f in wav_files if f.endswith('.wav')]
trans_files = os.listdir(trans_path)
trans_files = [trans_path + "/" + f for f in trans_files if f.endswith('.json')]
for file in wav_files:
if file[0] is not '.':
# with wave.open(file, "rb") as wav_f:
# print(f.getparams())
wav_parts_paths = wav_segments_path + '/' + file.split('.', 1)[0]
if not os.path.exists(wav_parts_paths):
os.makedirs(wav_parts_paths)
trans_parts_path = trans_segments_path + '/' + file.split('.', 1)[0]
if not os.path.exists(trans_parts_path):
os.makedirs(trans_parts_path)
# print(wav_parts_paths)
# print(file)
print(trans_path + "/" + file.rsplit('_', 1)[0] + '.json')
with open(trans_path + "/" + file.rsplit('_', 1)[0] + '.json', 'r') as trans_f:
trans = json.load(trans_f)
# print(len(trans))
for i in range(len(trans)-1):
# print(i)
# sub_trans = trans[i]
# if not lines:
# break
# trans_info = lines.split('\t', 4)
start_time = trans[i]['start_time']
print(start_time)
start_time = (int(start_time.split(':')[0])*3600 + int(start_time.split(':')[1])*60 + float(start_time.split(':')[2]))*1000
end_time = trans[i]['end_time']
print(end_time)
# with open(trans_parts_path + '/' + file.split('.', 1)[0] + '_' + str(i) + '.txt', 'w') as w:
# w.write(file.split('.', 1)[0] + '_' + str(i) + '.wav' + ' ' + trans[i]['words'])
end_time = (int(end_time.split(':')[0])*3600 + int(end_time.split(':')[1])*60 + float(end_time.split(':')[2]))*1000
# print(trans_info[0])
# print(start_time,end_time)
wav = AudioSegment.from_mp3(wav_path + '/' + file)
wav_parts = wav[int(start_time) : int(end_time)]
# wav_parts.export(wav_parts_paths + '/' + file.split('.', 1)[0] + '_' + str(i) + '.wav', format="wav")
wav_parts.export(wav_parts_paths + '/' + trans[i]['uttid'] + '.wav', format="wav")
#if __name__ == '__main__':
|
python
|
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.test.utils import log
from networkapi.test.utils import xml2dict
def assert_response_error(response, codigo, descricao=None):
""" Verifica se a resposta da networkapi foi como esperada. Quando for passado uma lista de códigos
possiveis, a descrição não poderá ser passada
"""
# trata o argumento código, quando somente 1 elemento for passado
codigos = codigo if type(codigo) == list else [codigo]
try:
networkapi_response = xml2dict(response.content)
codigo_resposta = int(networkapi_response['erro']['codigo'])
descricao_resposta = networkapi_response['erro']['descricao']
assert codigo_resposta in codigos, u"Código de resposta inválido: %d (descricao: %s). Esperados: %s" % (
codigo_resposta, descricao_resposta, repr(codigos))
assert descricao_resposta is not None
assert len(descricao_resposta) > 0
if descricao:
assert descricao_resposta == descricao
except:
# Se houver algum erro na formação (parsing) da resposta, imprimo qual
# ela era para facilitar a investigação
log.error("Erro fazendo parsing da resposta:\n%s\n",
(response or response.content))
raise
def assert_response_success(response, status_code=200, codigo=0, stdout=None, stderr=None):
""" Verifica se a resposta da networkapi foi sucesso e com os valores informados """
try:
assert response.status_code == status_code
networkapi_response = xml2dict(response.content)
codigo_resposta = int(networkapi_response['sucesso']['codigo'])
assert codigo_resposta == codigo, u"Código de resposta inválido: %d. Esperado: %d" % (
codigo_resposta, codigo)
if stdout:
assert networkapi_response['sucesso'][
'descricao']['stdout'] == stdout
if stderr:
assert networkapi_response['sucesso'][
'descricao']['stderr'] == stderr
except:
# Se houver algum erro na formação (parsing) da resposta, imprimo qual
# ela era para facilitar a investigação
log.error("Erro fazendo parsing da resposta:\n%s\n",
(response or response.content))
raise
|
python
|
import os
## FIXME most of the path variables should come from env vars
PWD = os.getcwd()
OUTPUT_PATH= os.path.join(PWD, "CodeComb_Outputs")
#FORMATS = ['.cpp']
DATA_PATH = os.path.join(PWD, "CodeComb_Data")
DF_FILE = "df_corpus"
DOC_EMB_PATH = os.path.join(OUTPUT_PATH, "doc_emb_" + DF_FILE)
ANN_INDEX_PATH = os.path.join(OUTPUT_PATH, "annoy_" + DF_FILE)
W2V_MODEL_PATH = os.path.join(OUTPUT_PATH, "w2v_model_" + DF_FILE)
DF_PATH = os.path.join(DATA_PATH, DF_FILE)
def ensure_dir(file_path):
print ("Checking path = {}".format(file_path))
if not os.path.exists(file_path):
os.makedirs(file_path)
def init_path():
ensure_dir(OUTPUT_PATH)
ensure_dir(DATA_PATH)
|
python
|
import scrapy
import re
from .mirkcolorselector import sampler_function
class DecjubaSpider(scrapy.Spider):
name = "decjuba_products"
start_urls = [
'https://www.decjuba.com.au/collections/women/dresses',
'https://www.decjuba.com.au/collections/women/jackets',
'https://www.decjuba.com.au/collections/women/cardigans',
'https://www.decjuba.com.au/collections/women/pants',
'https://www.decjuba.com.au/collections/women/shorts',
'https://www.decjuba.com.au/collections/women/skirts',
'https://www.decjuba.com.au/collections/women/tees',
'https://www.decjuba.com.au/collections/women/tops',
'https://www.decjuba.com.au/collections/d-luxe/pants',
'https://www.decjuba.com.au/collections/d-luxe/dl-dresses',
'https://www.decjuba.com.au/collections/d-luxe/dl-tops'
]
def parse(self, response):
for product in response.xpath('//p[@class="h6"]'):
url = "https://www.decjuba.com.au" + product.css('a::attr(href)').extract_first()
next_page = response.xpath('//span[@class="next"]/a/@href').extract_first()
if next_page is not None:
yield response.follow(next_page, self.parse)
yield scrapy.Request(url, callback=self.parse_product, meta={'start_url':response.request.url})
def parse_product(self, response):
class Item(scrapy.Item):
name = scrapy.Field()
price = scrapy.Field()
link = scrapy.Field()
images = scrapy.Field()
sizes = scrapy.Field()
style = scrapy.Field()
stock = scrapy.Field()
gender = scrapy.Field()
colour = scrapy.Field()
address = scrapy.Field()
location = scrapy.Field()
item_type = scrapy.Field()
vendor_name = scrapy.Field()
def women_size_converter(size):
return {
'XXS': 6,
'XXS/XS': 8,
'XS': 8,
'XS/S': 10,
'S': 10,
'S/M': 12,
'M': 12,
'M/L': 14,
'L': 14,
'L/XL': 16,
'XL': 16,
'XL/XXL': 18,
'XXL': 18,
'onesize': None,
'6': 6,
'8': 8,
'10': 10,
'12': 12,
'14': 14,
'16': 16,
'36': 5,
'37': 6,
'38': 7,
'39': 8,
'40': 9,
'41': 10,
}.get(size, size)
for info in response.xpath('//div[contains(@class, "product-single") and contains(@class, "grid")]'):
item = Item()
item['name'] = info.xpath('//div[@itemprop="name"]/text()').extract_first()
price = re.findall(r'(\d[^\s\\]+)', str(info.xpath('//div[@itemprop="price"]/text()').extract()))
item['price'] = float(price[0])
item['link'] = response.url
sizes = info.xpath('//ul[@class="size-container"]/li/input/@value').extract()
item['sizes'] = [women_size_converter(i) for i in sizes]
item['style'] = info.xpath('//div[@id="product-description"]/p/text()').extract()
item['images'] = ['https:' + i for i in info.xpath('//div[@class="swiper-wrapper"]/div/img').xpath('@src').extract()]
colour = info.xpath('//span[@class="colour-option"]/img').xpath('@src').extract()
item['colour'] = [sampler_function(i, 0.3) for i in colour][0]
item['gender'] = 'Women'
item['address'] = "Shop 310, Broadway Shopping Centre 1 Bay Street, Broadway, New South Wales 2007, Australia"
item['location'] = "-33.883835, 151.194704"
item['stock'] = True
item['item_type'] = re.findall(r'.+(\/.+)$', response.meta['start_url'])
item['vendor_name'] = 'Decjuba'
yield item
|
python
|
from cloudshell.devices.runners.autoload_runner import AutoloadRunner
from vyos.flows.autoload import VyOSAutoloadFlow
class VyOSAutoloadRunner(AutoloadRunner):
def __init__(self, resource_config, cli_handler, logger):
"""
:param resource_config:
:param cli_handler:
:param logger:
"""
super(VyOSAutoloadRunner, self).__init__(resource_config)
self._cli_handler = cli_handler
self._logger = logger
@property
def autoload_flow(self):
return VyOSAutoloadFlow(cli_handler=self._cli_handler,
resource_config=self.resource_config,
logger=self._logger)
def discover(self):
"""
:return: AutoLoadDetails object
"""
return self.autoload_flow.execute_flow()
|
python
|
"""
Dada uma String "str", retorne true se nela possuir o mesmo número de ocorrências
das strings "cat" e "dog".
Ex.:(('catdog') → True; ('1cat1cadodog') → True; ('catcat') → False).
"""
def cat_dog(str):
return str.count("cat") == str.count("dog")
print(cat_dog("catdog"))
|
python
|
"""
This file implements a general purpose best-first planner.
--------------HOW TO INITIALIZE IT -------------
An instance of the planner is created using
planner = Planner(s)
where s is the initial state in the planning process.
The planner needs five functions/methods to work properly.
These functions can either be implemented as methods of the state 's',
or provided as optional arguments to the constructor. They are:
get_children : state -> iterable collection of states.
This function takes a state and returns all its neighbours in the state space
is_goal_state : state -> bool
This function returns true if the provided state is a goal state
extract_plan : state -> whatever result you want
This function generates some sort of plan from the goal state.
This is what the planner returns, and the planner itself doesn't care
about how the extracted plan looks
heuristic : state -> int
This function produces a heuristic value for the provided state.
Technically, it could map each state to anything that is comparable,
but integers are fast, so lets stick to that.
get_g_value : state -> int
This funciton gives the distance from the initial state of the search to this state
The functions provided as arguments take precedence over method implementations in 's'.
This means that it is possible to implement all methods in 's',
but still provide a custom heuristic function to the planner in the following way:
planner = Planner(s, heuristic=custom_heuristic_function)
---------------- HOW TO USE IT ----------------------------
The planner supports three different ways of searching the state space:
expand_one_state - picks one state from the frontier and expands it. It is more or less useless, and used only as a sub procedure
expand_n_states - this method takes an integer 'n' as argument and repeats expand_one_state 'n' times or until a successful plan is found.
This is useful if you want to search for a while, but stop if the search takes too long
If a plan is found during the execution of any of the above procedues, it is stored in the attribute 'plan' of the planner.
I.e., get it using (some_planner.plan). The attribute 'plan' is None if no plan is found
The last way of finding a plan is:
make_plan - This method starts searching and stops when a plan is found or the state space is exhausted.
It returns None if it couldn't find a plan, or the plan itself otherwise
"""
import heapq
import sys
from time import perf_counter
def default_heuristic(state):
return state.heuristic()
def default_get_g_value(state):
return state.get_g_value()
def default_is_goal_state(state):
return state.is_goal_state()
def default_extract_plan(state):
return state.extract_plan()
def default_get_children(state):
return state.get_children()
class Planner(object):
def __init__(self,initial_state,get_children = None,is_goal_state = None,extract_plan = None, heuristic = None, g_value = None, cutoff_solution_length = None, print_status = True):
#Setting the functions used to explore the state space
#Use implementaitons in state unless new functions are provided
self.get_children = get_children if get_children else default_get_children
self.is_goal_state = is_goal_state if is_goal_state else default_is_goal_state
self.extract_plan = extract_plan if extract_plan else default_extract_plan
self.heuristic = heuristic if heuristic else default_heuristic
self.g_value = g_value if g_value else default_get_g_value
#Adding the initial state to the frontier
self.frontier = []
heapq.heapify(self.frontier)
firstEntry = (self.heuristic(initial_state), initial_state)
heapq.heappush(self.frontier, firstEntry)
#Initialize remaining variables
self.expanded_set = set()
self.plan = None
#Replace the cutoff with a suitably large number if there is no cutoff
self.cutoff_solution_length = cutoff_solution_length if cutoff_solution_length else 2000000000
#output planner status
self.print_status = print_status
self.start_time = perf_counter()
self.times_printed = []
def expand_one_state(self):
#TODO: Fix this: it's not very good. What if there is no solution and the state space is exhausted?
assert len(self.frontier) > 0, "state space exhausted in planner"
#Extract the state with minimum heuristic value from the frontier
result = heapq.heappop(self.frontier)
state = result[1]
#Find the plan if state is goal
if self.is_goal_state(state):
#if hasattr(state, 'game_state'):
# print("--------------------------FOUND THE GOAL-----------------",file=sys.stderr)
# print(state.game_state,file=sys.stderr)
self.plan = self.extract_plan(state)
return
#Add the state to the expanded set
self.expanded_set.add(state)
#Get the unexpanded neighbours of the state
children = self.get_children(state)
#Filter out expanded states and states that are past the cutoff for solution length
children = [s for s in children if s not in self.expanded_set and self.g_value(s) < self.cutoff_solution_length]
#Calculate their heuristic value
children = [(self.heuristic(s),s) for s in children]
#Add them to the frontier
for entry in children:
heapq.heappush(self.frontier, entry)
# will print the current search status roughly every 10 seconds
if self.print_status:
if not int('%.f' % self.used_time()) % 10 and int('%.f' % self.used_time()) not in self.times_printed:
self.times_printed.append(int('%.f' % self.used_time()))
print(self.search_status(), file=sys.stderr, flush=True)
#Expands at most n more states from the frontier.
#Returns the plan if it is found, otherwise returns None
def expand_n_states(self, n):
for i in range(n):
if self.plan:
return self.plan
self.expand_one_state()
return self.plan
#finds a plan to the problem. If there is no goal state, returns None
def make_plan(self):
while(len(self.frontier) > 0 and not self.plan):
self.expand_one_state()
return self.plan
def used_time(self):
return perf_counter()-self.start_time
def search_status(self):
return "expanded: {}, frontier: {}, generated: {}, time: {:3.2f} ".format(len(self.expanded_set), len(self.frontier), len(self.expanded_set)+len(self.frontier), self.used_time())
|
python
|
import cv2
import apriltag
# Функция для вывода изображения на экран
def viewImage(image, window_name='window name'):
cv2.imshow(window_name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Считываем изображение и преобразуем его в grayscale
tag = cv2.imread('/home/administrator/PycharmProjects/trial/signs/t9.png')
gray_tag = cv2.cvtColor(tag, cv2.COLOR_BGR2GRAY)
viewImage(gray_tag)
# Определяем семейство apriltags, затем настраиваем детектор и распознаем apriltag на картинке
options = apriltag.DetectorOptions(families='tag36h11')
detector = apriltag.Detector(options)
results = detector.detect(gray_tag)
print("[INFO] {} total AprilTags detected".format(len(results)))
print(results)
for r in results:
# extract the bounding box (x, y)-coordinates for the AprilTag
# and convert each of the (x, y)-coordinate pairs to integers
(ptA, ptB, ptC, ptD) = r.corners
ptB = (int(ptB[0]), int(ptB[1]))
ptC = (int(ptC[0]), int(ptC[1]))
ptD = (int(ptD[0]), int(ptD[1]))
ptA = (int(ptA[0]), int(ptA[1]))
# draw the bounding box of the AprilTag detection
cv2.line(tag, ptA, ptB, (0, 255, 0), 2)
cv2.line(tag, ptB, ptC, (0, 255, 0), 2)
cv2.line(tag, ptC, ptD, (0, 255, 0), 2)
cv2.line(tag, ptD, ptA, (0, 255, 0), 2)
# draw the center (x, y)-coordinates of the AprilTag
(cX, cY) = (int(r.center[0]), int(r.center[1]))
cv2.circle(tag, (cX, cY), 5, (0, 0, 255), -1)
# draw the tag family on the image
tagID = str(r.tag_id)
cv2.putText(tag, tagID, (ptA[0], ptA[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
print("[INFO] tag ID: {}".format(tagID))
viewImage(tag)
|
python
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.math import BSpline, global_bspline_interpolation, rational_bspline_from_arc, Vec3
def test_from_nurbs_python_curve_to_ezdxf_bspline():
from geomdl.fitting import interpolate_curve
curve = interpolate_curve([(0, 0), (0, 10), (10, 10), (10, 0)], degree=3)
bspline = BSpline.from_nurbs_python_curve(curve)
assert bspline.degree == 3
assert len(bspline.control_points) == 4
assert len(bspline.knots()) == 8 # count + order
def test_from_ezdxf_bspline_to_nurbs_python_curve_non_rational():
bspline = global_bspline_interpolation([(0, 0), (0, 10), (10, 10), (10, 0)], degree=3)
# to NURBS-Python
curve = bspline.to_nurbs_python_curve()
assert curve.degree == 3
assert len(curve.ctrlpts) == 4
assert len(curve.knotvector) == 8 # count + order
assert curve.rational is False
# and back to ezdxf
spline = BSpline.from_nurbs_python_curve(curve)
assert spline.degree == 3
assert len(spline.control_points) == 4
assert len(spline.knots()) == 8 # count + order
def test_from_ezdxf_bspline_to_nurbs_python_curve_rational():
bspline = rational_bspline_from_arc(center=Vec3(0, 0), radius=2, start_angle=0, end_angle=90)
# to NURBS-Python
curve = bspline.to_nurbs_python_curve()
assert curve.degree == 2
assert len(curve.ctrlpts) == 3
assert len(curve.knotvector) == 6 # count + order
assert curve.rational is True
assert curve.weights == [1.0, 0.7071067811865476, 1.0]
# and back to ezdxf
spline = BSpline.from_nurbs_python_curve(curve)
assert spline.degree == 2
assert len(spline.control_points) == 3
assert len(spline.knots()) == 6 # count + order
assert spline.weights() == (1.0, 0.7071067811865476, 1.0)
if __name__ == '__main__':
pytest.main([__file__])
|
python
|
import itertools
import numpy
from matplotlib import pyplot
from typing import Dict, Sequence, Tuple
from warg import Number
__all__ = [
"plot_errors",
"masks_to_color_img",
"plot_prediction",
"bounding_box_from_mask",
]
def plot_errors(results_dict: Dict, title: str) -> None:
"""
Args:
results_dict:
title:
"""
markers = itertools.cycle(("+", "x", "o"))
pyplot.title(f"{title}")
for label, result in sorted(results_dict.items()):
pyplot.plot(result, marker=next(markers), label=label)
pyplot.ylabel("dice_coef")
pyplot.xlabel("epoch")
pyplot.legend(loc=3, bbox_to_anchor=(1, 0))
pyplot.show()
def masks_to_color_img(masks: numpy.ndarray) -> numpy.ndarray:
"""
Args:
masks:
Returns:
"""
height, width, mask_channels = masks.shape
color_channels = 3
color_image = numpy.zeros((height, width, color_channels), dtype=numpy.uint8) * 255
for y in range(height):
for x in range(width):
for mc in range(mask_channels):
color_image[y, x, mc % color_channels] = masks[y, x, mc]
return color_image.astype(numpy.uint8)
def plot_prediction(
img_array: numpy.ndarray,
labels: Sequence,
max_pred: Sequence,
pred: Sequence,
n_col: int = 3,
) -> None:
"""
Args:
img_array:
labels:
max_pred:
pred:
n_col:
"""
n_row = len(img_array) // n_col
f, plots = pyplot.subplots(
n_row, n_col, sharex="all", sharey="all", figsize=(n_col * 4, n_row * 4)
)
for i in range(len(img_array)):
plots[i // n_col, i % n_col].imshow(img_array[i])
plots[i // n_col, i % n_col].set_title(
f"truth:{labels[i]},\n max_pred:{max_pred[i]},\n pred:{pred[i]}", fontsize=8
)
def bounding_box_from_mask(
hard_mask: numpy.ndarray,
) -> Tuple[Number, Number, Number, Number]:
"""
Args:
hard_mask:
Returns:
"""
nz = numpy.nonzero(hard_mask)
return numpy.min(nz[0]), numpy.min(nz[1]), numpy.max(nz[0]), numpy.max(nz[1])
|
python
|
# -*- coding: utf-8 -*-
import numpy
import os
from os.path import join
import shutil
import time
import sys
import math
import json
import utilities
import matplotlib.pyplot as plt
def get_num_antennas(ms):
"""."""
tb.open(ms + '/ANTENNA', nomodify=True)
num_stations = tb.nrows()
tb.close()
return num_stations
def average_ms(ms_ref, ms_in, ms_out, overwrite=True):
if not overwrite and os.path.isdir(ms_out):
return
# Create output MS by making a copy of the reference MS.
if os.path.exists(ms_out):
shutil.rmtree(ms_out)
print 'Averaging MS:', ms_in
shutil.copytree(ms_ref, ms_out)
tb.open(ms_in, nomodify=True)
num_rows = tb.nrows()
num_times = num_rows / num_baselines
col_data = tb.getcol('DATA')
col_uvw = tb.getcol('UVW')
col_ant1 = tb.getcol('ANTENNA1')
col_ant2 = tb.getcol('ANTENNA2')
col_time = tb.getcol('TIME')
uu = col_uvw[0, :]
uu = uu.reshape(num_times, num_baselines)
ave_uu = numpy.mean(uu, axis=0)
vv = col_uvw[1, :]
vv = vv.reshape(num_times, num_baselines)
ave_vv = numpy.mean(vv, axis=0)
ww = col_uvw[2, :]
ww = ww.reshape(num_times, num_baselines)
ave_ww = numpy.mean(ww, axis=0)
t = col_time
t = t.reshape(num_times, num_baselines)
ave_t = numpy.mean(t, axis=0)
# Assert that the MS has 1 channel and is stokes-I only.
assert col_data.shape[0] == 1
assert col_data.shape[1] == 1
assert col_data.shape[2] == num_rows
data = numpy.squeeze(col_data)
data = data.reshape(num_times, num_baselines)
ave_data = numpy.mean(data, axis=0)
tb.close()
tb.open(ms_out, nomodify=False)
col_data = tb.getcol('DATA')
tb.putcol('DATA', numpy.reshape(ave_data, col_data.shape))
col_data = tb.getcol('DATA')
tb.close()
if __name__ == "__main__":
"""Copy the ref ms and populate it with averaged input ms."""
settings = utilities.byteify(json.load(open(config_file)))
sim_dir = settings['path']
ms_ref = join(sim_dir, 'n0001.ms')
num_antennas = get_num_antennas(ms_ref)
num_baselines = num_antennas * (num_antennas - 1) / 2
for n in settings['sim']['observation']['num_times']:
if n == 1:
continue
# === No smearing ===
ms_in = join(sim_dir, 'n%04i.ms' % n)
ms_out = join(sim_dir, 'ave_n%04i.ms' % n)
average_ms(ms_ref, ms_in, ms_out, overwrite=False)
# === With analytical smearing ===
ms_in = join(sim_dir, 'n%04i_smearing.ms' % n)
ms_out = join(sim_dir, 'ave_n%04i_smearing.ms' % n)
average_ms(ms_ref, ms_in, ms_out, overwrite=False)
|
python
|
# coding=utf-8
import datetime
import json
import time
import redis
import scrapy
from pymongo import MongoClient
from scrapy.http import Request
from scrapy_redis.spiders import RedisSpider
from biliob_spider.items import TagListItem
from biliob_tracer.task import SpiderTask
from db import db
class TagAdderSpider(RedisSpider):
name = "tagAdder"
allowed_domains = ["bilibili.com"]
start_urls = []
custom_settings = {
'ITEM_PIPELINES': {
'biliob_spider.pipelines.TagAdderPipeline': 300
},
}
def __init__(self):
self.db = db
def start_requests(self):
for i in self.start_urls:
yield Request(i, meta={
'dont_redirect': True,
'handle_httpstatus_list': [302]
}, callback=self.parse)
def parse(self, response):
try:
aid = str(
response.url.lstrip(
'https://www.bilibili.com/video/av').rstrip('/'))
tagName = response.xpath("//li[@class='tag']/a/text()").extract()
item = TagListItem()
item['aid'] = int(aid)
item['tag_list'] = []
if tagName != []:
ITEM_NUMBER = len(tagName)
for i in range(0, ITEM_NUMBER):
item['tag_list'].append(tagName[i])
yield item
except Exception as error:
# 出现错误时打印错误日志
print(error)
item = TagListItem()
item['aid'] = int(aid)
item['tag_list'] = []
yield item
|
python
|
"""
Bot's behaviour
"""
INTENTS = [
{
'name': 'Date',
'tokens': ('when', 'time', 'date', 'at', '1'), # You can add any key words in the list
'scenario': None,
'answer': 'The conference is being held on May 10, registration will start at 11 am.'
},
{
'name': 'Place',
'tokens': ('where', 'place', 'location', 'address', 'station', '2', ), # You can add any key words in the list
'scenario': None,
'answer': 'The conference will be held at the Centre of the City.'
},
{
'name': 'Registration',
'tokens': ('reg', 'add', '3', ), # You can add any key words in the list
'scenario': 'registration',
'answer': None
},
{
'name': 'Greetings',
'tokens': ('thx', 'thank', '4', ), # You can add any key words in the list
'scenario': None,
'answer': 'You are welcome!'
},
]
SCENARIOS = {
'registration': {
'first_step': 'step1',
'steps': {
'step1': {
'text': 'Write your name to register. It will be shown on your badge',
'failure_text': 'Name must contain at least 2 symbols. Try one more time',
'handler': 'handle_name',
'next_step': 'step2',
},
'step2': {
'text': 'Send your e-mail, we will send all the required information to this address.',
'failure_text': 'There is a typo in you email. Try one more time',
'handler': 'handle_email',
'next_step': 'step3',
},
'step3': {
'text': 'Thanks for your time, {name}! Your ticket is below, also, we sent the ticket to your {email}, print it',
'image': 'generate_ticket_handler',
'failure_text': None,
'handler': None,
'next_step': None,
},
}
}
}
DEFAULT_ANSWER = 'IDK how to answer.' \
'But I know where the conference is held and I can send you all the information I know, just ask me'
DB_CONFIG = dict(
provider='postgres',
user='postgres',
password='',
host='localhost',
database='chatbot'
)
|
python
|
from rest_framework.exceptions import APIException
class CFSSLError(APIException):
status_code = 503
default_detail = 'Could not create Docker certificate.'
default_code = 'docker_certificate_service_unavailable'
|
python
|
"""Source code for categorical dqn brain class.
Author: Yoshinari Motokawa <[email protected]>
"""
from typing import List
import torch
from omegaconf import DictConfig
from torch import nn
from .abstract_brain import AbstractBrain
from core.agents.models.customs.categorical_dqn import ApplySoftmax
class CategoricalDQNBrain(AbstractBrain):
def __init__(self, config: DictConfig, obs_shape: List[int], act_size: int):
super().__init__(config=config, obs_shape=obs_shape, act_size=act_size)
self.gamma = config.gamma
self.num_atoms = config.model.num_atoms
self.V_min = config.model.V_min
self.V_max = config.model.V_max
self.support = torch.linspace(self.V_min, self.V_max, self.num_atoms).to(
device=self.device
) # Support (range) of z
self.delta_z = (self.V_max - self.V_min) / (self.num_atoms - 1)
@torch.no_grad()
def get_action(self, state):
for state_key, state_value in state.items():
state[state_key] = state_value.unsqueeze(0).float().to(self.device)
model_output = self.network(state, ApplySoftmax.NORMAL)
model_output = torch.sum(model_output * self.support, dim=2)
_, action = torch.max(model_output, dim=1)
action = int(action.item())
return action
def learn(self, states_ind, actions_ind, rewards_ind, dones_ind, next_states_ind):
for states_key, states_value in states_ind.items():
states_ind[states_key] = states_value.float().to(self.device)
actions_ind = actions_ind.to(self.device)
rewards_ind = rewards_ind.float().to(self.device)
dones_ind = dones_ind.to(self.device)
for next_states_key, next_states_value in next_states_ind.items():
next_states_ind[next_states_key] = next_states_value.float().to(self.device)
batch_size = dones_ind.shape[0]
log_p = self.network(states_ind, ApplySoftmax.LOG)
log_p_a = log_p[range(batch_size), actions_ind.squeeze()]
with torch.no_grad():
# 最も価値の高いactionを抽出
model_output = self.network(next_states_ind, ApplySoftmax.NORMAL)
best_actions = torch.sum(model_output * self.support, dim=2).argmax(dim=1)
p_next = self.target_network(next_states_ind, ApplySoftmax.NORMAL)
# (1) terminal state用に確率分布としてすべてのatomに同じ値を与えておく
p_next_best = torch.zeros(0).to(self.device, dtype=torch.float32).new_full((batch_size, self.num_atoms), 1.0 / self.num_atoms)
# terminal state以外はDDQNで計算したもので上書き
p_next_best = p_next[range(batch_size), best_actions]
# 報酬を分布に直す
Tz = (rewards_ind.unsqueeze(1) + self.gamma * self.support.unsqueeze(0)).clamp(self.V_min, self.V_max)
b = (Tz - self.V_min) / self.delta_z
lower = b.floor().long()
upper = b.ceil().long()
# (3) bの値がちょうど整数値だった場合にmの要素値が0となってしまうことを回避
lower[(lower == upper) * (0 < lower)] -= 1
# ↑の処理によってlの値は既に変更済みなため、↓の処理が同時に行われてしまうことはない
upper[(lower == upper) * (upper < self.num_atoms - 1)] += 1
m = torch.zeros(batch_size, self.num_atoms).to(self.device, dtype=torch.float32)
# (4) ミニバッチの各要素毎に和を持っておくため、offsetを計算した上でmを一次元のリストにして扱う
offset = torch.linspace(0, ((batch_size - 1) * self.num_atoms), batch_size).unsqueeze(1).expand(batch_size, self.num_atoms).to(lower)
m.view(-1).index_add_(0, (lower + offset).view(-1), (p_next_best * (upper.float() - b)).view(-1))
m.view(-1).index_add_(0, (upper + offset).view(-1), (p_next_best * (b - lower.float())).view(-1))
self.optimizer.zero_grad()
loss = -torch.sum(m * log_p_a, dim=1).mean()
loss.backward()
nn.utils.clip_grad_norm_(self.network.parameters(), 10)
self.optimizer.step()
return loss.detach()
|
python
|
# -*- coding: utf-8 -*-
"""Model definitions."""
from django.db import models
from picklefield.fields import PickledObjectField
class Model(models.Model):
"""GLM model."""
blob = PickledObjectField()
def __str__(self):
return f'Hello, I am the GLM model #{self.id}'
|
python
|
import anki_vector
from anki_vector.util import Pose, degrees
def main():
args = anki_vector.util.parse_command_args()
with anki_vector.Robot(args.serial, show_3d_viewer=True, enable_nav_map_feed=True) as robot:
robot.behavior.drive_off_charger()
fixed_object = robot.world.create_custom_fixed_object(Pose(200, -50, 0, angle_z=degrees(90)), 100, 50, 100, relative_to_robot=True)
fixed_object = robot.world.create_custom_fixed_object(Pose(100, 50, 0, angle_z=degrees(90)), 100, 50, 100, relative_to_robot=True)
if fixed_object:
print("fixed custom objects created successfully")
robot.behavior.go_to_pose(Pose(300, 0, 0, angle_z=degrees(0)), relative_to_robot=True)
robot.world.delete_custom_objects()
if __name__ == "__main__":
main()
|
python
|
# proxy module
from __future__ import absolute_import
from mayavi.action.filters import *
|
python
|
class Solution:
def XXX(self, head: ListNode, n: int) -> ListNode:
slow=head
fast=head
for i in range(n):
fast=fast.next
if fast==None:
return slow.next
else:
fast=fast.next
while fast:
fast=fast.next
slow=slow.next
slow.next=slow.next.next
return head
|
python
|
# -*- coding: utf-8 -*-
import click
import pytest
from click.testing import CliRunner
from gitlabctl.cli import project_get_env
from gitlabctl.cli import run_pipeline
__author__ = "Thomas Bianchi"
__copyright__ = "Thomas Bianchi"
__license__ = "mit"
def main_get_env(func_name, id):
return [id]
def main_run_pipeline(func_name, d):
click.echo(d)
get_env_by_id_expections = [
pytest.param(['--by-id', '1123'], '1123\n', id="full"),
pytest.param(None, '\n', id="no-id"),
]
run_pipeline_expections = [
pytest.param(['NOPROD=1'], "[{'key': 'NOPROD', 'value': '1'}]\n",
id="spaced single param"),
pytest.param(['NOPROD=1', 'PROVA=2'],
"[{'key': 'NOPROD', 'value': '1'}, {'key': 'PROVA', 'value': '2'}]\n",
id="spaced multiple params"),
# pytest.param(['NOPROD=1,PROVA=2'], pytest.raises(click.BadArgumentUsage),
# id="non spaced"),
]
@pytest.mark.parametrize("a,expected", get_env_by_id_expections)
def test_project_get_env(mocker, a, expected):
mocker.patch(
'gitlabctl.project.main',
main_get_env)
runner = CliRunner()
result = runner.invoke(project_get_env, a)
assert expected == result.output
@pytest.mark.parametrize("a,expected", run_pipeline_expections)
def test_run_pipeline(mocker, a, expected):
mocker.patch(
'gitlabctl.project.main',
main_run_pipeline)
runner = CliRunner()
result = runner.invoke(run_pipeline, a)
assert expected == result.output
|
python
|
from enum import Enum
class Status(Enum):
Dziecko=1,
Nastolatek=2,
Dorosly=3
def printFileName():
print("Status")
|
python
|
import logging
from typing import Optional, Sequence
from hybrid.sites import SiteInfo
import PySAM.Singleowner as Singleowner
from hybrid.log import hybrid_logger as logger
class PowerSource:
def __init__(self, name, site: SiteInfo, system_model, financial_model):
"""
Abstract class for a renewable energy power plant simulation.
"""
self.name = name
self.site = site
self.system_model = system_model
self.financial_model = financial_model
self.set_construction_financing_cost_per_kw(financial_model.FinancialParameters.construction_financing_cost \
/ financial_model.FinancialParameters.system_capacity)
@property
def system_capacity_kw(self) -> float:
raise NotImplementedError
def get_total_installed_cost_dollars(self) -> float:
return self.financial_model.SystemCosts.total_installed_cost
def set_total_installed_cost_dollars(self, total_installed_cost_dollars: float):
self.financial_model.SystemCosts.total_installed_cost = total_installed_cost_dollars
logger.info("{} set total_installed_cost to ${}".format(self.name, total_installed_cost_dollars))
def set_construction_financing_cost_per_kw(self, construction_financing_cost_per_kw):
self._construction_financing_cost_per_kw = construction_financing_cost_per_kw
def get_construction_financing_cost(self) -> float:
return self._construction_financing_cost_per_kw * self.system_capacity_kw
def simulate(self, project_life: int = 25):
"""
Run the system and financial model
"""
if not self.system_model:
return
self.system_model.execute(0)
if not self.financial_model:
return
self.financial_model.value("construction_financing_cost", self.get_construction_financing_cost())
self.financial_model.Revenue.ppa_soln_mode = 1
self.financial_model.Lifetime.system_use_lifetime_output = 1
self.financial_model.FinancialParameters.analysis_period = project_life
single_year_gen = self.financial_model.SystemOutput.gen
self.financial_model.SystemOutput.gen = list(single_year_gen) * project_life
if self.name != "Grid":
self.financial_model.SystemOutput.system_pre_curtailment_kwac = self.system_model.Outputs.gen * project_life
self.financial_model.SystemOutput.annual_energy_pre_curtailment_ac = self.system_model.Outputs.annual_energy
self.financial_model.execute(0)
logger.info("{} simulation executed".format(self.name))
def generation_profile(self) -> Sequence:
if self.system_capacity_kw:
return self.system_model.Outputs.gen
else:
return [0] * self.site.n_timesteps
def copy(self):
"""
:return: new instance
"""
raise NotImplementedError
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 13:41:37 2018
@author: craggles
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import scipy.special
import sklearn
from matplotlib import cm
from matplotlib import rc
import matplotlib
matplotlib.rc('pdf', fonttype=42)
start = -10
end = 10
resolution = 512
x=y= np.linspace(start,end,num=resolution)
xx,yy = np.meshgrid(x,y)
rr = np.sqrt(xx**2+yy**2)
tt = np.arctan2(y,x)
airy = 2*np.divide(scipy.special.jve(1,rr),rr)
#norm_airy = sklearn.preprocessing.normalize(airy)
#plt.imshow(airy)
colors = cm.viridis(airy)
fig = plt.figure()
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
#pgf_with_rc_fonts = {"pgf.texsystem": "pdflatex"}
#matplotlib.rcParams.update(pgf_with_rc_fonts)
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d([start, end])
ax.set_ylim3d([start, end])
ax.set_zlim3d([-0.08, 1])
# Plot a basic wireframe.
#ax.plot_wireframe(xx, yy, airy, rstride=10, cstride=10)
#surf = ax.plot_surface(xx, yy, airy, rcount=50, ccount=50,
# facecolors=colors, shade=False)
surf = ax.plot_surface(xx, yy, airy, cmap=cm.viridis)
#surf.set_facecolor((0,0,0,0))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('Electric field')
plt.tight_layout()
plt.savefig("airy_E_fill.pdf")
surf.remove()
#plt.show()
#%%
#plt.cla()
surf = ax.plot_surface(xx, yy, airy**2, cmap=cm.viridis)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('Intensity')
plt.savefig("airy_I_fill.pdf")
plt.show()
|
python
|
import os
from getpass import getpass
from netmiko import ConnectHandler
password = os.getenv("PYNET_PASSWORD") if os.getenv("PYNET_PASSWORD") else getpass()
net_connect = ConnectHandler(
host="cisco3.lasthop.io",
username="pyclass",
password=password,
device_type="cisco_ios",
session_log="my_session.txt",
)
print(net_connect.find_prompt())
net_connect.disconnect()
|
python
|
from pathlib import Path
from yacs.config import CfgNode as CN
import os
import time
import logging
import torch.distributed as dist
_C = CN()
_C.dataset = 'imagenet'
_C.data_dir = './data_list/'
_C.check_path = './checkpoint'
_C.arch = 'resnet50'
_C.workers = 32
_C.epochs = 400
_C.defer_epoch = 0
_C.start_epoch = 1
_C.batch_size = 256
_C.lr = 0.02
_C.momentum = 0.9
_C.weight_decay = 5e-4
_C.print_freq = 100
_C.resume = ''
_C.resume2 = ''
_C.world_size = 1
_C.rank = 0
_C.dist_url = 'tcp://localhost:10000'
_C.dist_backend = 'nccl'
_C.seed = None
_C.gpu = None
_C.evaluate = False
_C.multiprocessing_distributed = True
# options for moco v2
_C.moco_dim = 128
_C.moco_k = 8192
_C.moco_m = 0.999
_C.grad = False
_C.mlp = True
_C.aug_plus = False
_C.normalize = False
_C.queue_size_per_cls = 4
_C.smooth = 0.1
_C.ldam_m = 0.1
# options for SupCon
_C.con_type = 'SupConLoss'
_C.gamma = 128
_C.margin = 0.25
_C.con_weight = 1.0
_C.balsfx_n = 0.0
_C.effective_num_beta = 0.99
_C.temperature = 0.1
_C.log_weight = 7.0
# options for others
_C.mark = ''
_C.debug = False
_C.aug = 'randcls_sim'
_C.log_dir = 'logs'
_C.model_dir = 'ckps'
_C.warm_epochs = 10
_C.randaug_m = 10
_C.randaug_n = 2
_C.color_p = 1.0
_C.color_h = 0.0
_C.branch_type = 'balance'
_C.alpha = 0.2
_C.path = 'same'
_C.pos_size_per_cls = 8
_C.neg_size_per_cls = 4
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
log_dir = Path("saved") / (cfg.mark) / Path(cfg.log_dir)
print('=> creating {}'.format(log_dir))
log_dir.mkdir(parents=True, exist_ok=True)
log_file = '{}.txt'.format(cfg.mark)
# final_log_file = log_dir / log_file
model_dir = Path("saved") / (cfg.mark) / Path(cfg.model_dir)
print('=> creating {}'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
cfg.model_dir = str(model_dir)
# cfg.freeze()
import logging
import os
import sys
class NoOp:
def __getattr__(self, *args):
def no_op(*args, **kwargs):
"""Accept every signature by doing non-operation."""
pass
return no_op
def get_logger(config, resume=False, is_rank0=True):
"""Get the program logger.
Args:
log_dir (str): The directory to save the log file.
log_name (str, optional): The log filename. If None, it will use the main
filename with ``.log`` extension. Default is None.
resume (str): If False, open the log file in writing and reading mode.
Else, open the log file in appending and reading mode; Default is "".
is_rank0 (boolean): If True, create the normal logger; If False, create the null
logger, which is useful in DDP training. Default is True.
"""
if is_rank0:
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
# # StreamHandler
# stream_handler = logging.StreamHandler(sys.stdout)
# stream_handler.setLevel(level=logging.INFO)
# logger.addHandler(stream_handler)
# FileHandler
if resume == False:
mode = "w+"
else:
mode = "a+"
log_dir = Path("saved") / (config.mark) / Path(config.log_dir)
log_name = config.mark + ".log"
file_handler = logging.FileHandler(os.path.join(log_dir, log_name), mode=mode)
file_handler.setLevel(level=logging.INFO)
logger.addHandler(file_handler)
else:
logger = NoOp()
return logger
|
python
|
#!/usr/local/sbin/charm-env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import (
Endpoint,
toggle_flag,
)
from charmhelpers.core.hookenv import log
class KubeMastersPeer(Endpoint):
"""
Implements peering for kubernetes-master units.
"""
def manage_flags(self):
"""
Set states corresponding to the data we have.
"""
toggle_flag(
self.expand_name('{endpoint_name}.connected'),
self.is_joined)
toggle_flag(
self.expand_name('{endpoint_name}.cohorts.ready'),
self.is_joined and self._peers_have_cohorts())
def _peers_have_cohorts(self):
"""
Return True if all peers have cohort keys.
"""
for unit in self.all_joined_units:
if not unit.received.get('cohort-keys'):
log('Unit {} does not yet have cohort-keys'.format(unit))
return False
log('All units have cohort-keys')
return True
def set_cohort_keys(self, cohort_keys):
"""
Send the cohort snapshot keys.
"""
for relation in self.relations:
relation.to_publish['cohort-keys'] = cohort_keys
|
python
|
import sys
import re as _re
from fclpy.lisptype import LispSymbol
class LispStream():
def __init__(self, fh):
self.fh = fh
self.tokens = []
self.buff = []
def unread_char(self, y):
self.buff.append(y)
def push_token(self, token):
self.tokens.append(token)
def has_token(self,token):
return token in self.tokens
def pop_token(self):
return self.tokens.pop()
def read_char(self):
if len(self.buff) > 0:
return self.buff.pop()
return self.fh.read(1)
def eof(self):
return False
STDIN = LispStream(sys.stdin)
class LispReader():
def __init__(self, get_macro_character, stream = STDIN):
self.stream = stream
self.get_macro_character = get_macro_character
def read_1(self):
toss = True
while(toss):
toss = False
x = self.stream.read_char()
if self.stream.eof():
return None
elif (not self.valid_char(x)):
raise Exception("reader-error")
elif self.whitespace_char(x):
toss = True
elif self.macro_character(x):
return self.get_macro_character(x)(x,self.stream)
elif self.single_escape_character(x):
y = self.stream.read_char()
if self.stream.eof():
raise Exception("reader-error")
return self.read_8(y.upper())
elif self.multiple_escape_character(x):
return self.read_9("")
else:
return self.read_8(x.upper())
def read_8(self, token):
more = True
while(more):
y = self.stream.read_char()
if self.terminating_macro_character(y):
self.stream.unread_char(y)
more = False
elif self.whitespace_char(y):
more = False
else:
token = token + y.upper()
return self.read_10(token)
def read_10(self, token):
if _re.match("[0-9].*",token):
return token
return LispSymbol(token)
def valid_char(self,c):
return c == c
def whitespace_char(self,c):
return c in [" ","\t","\n","\r"]
def eof(self,c):
return c != c
def macro_character(self,c ):
return c in ["(",")","'",";"]
def terminating_macro_character(self,c):
return c in [")"]
def non_terminating_macro_character(self,c):
return c != c
def single_escape_character(self,c):
return c == "\\"
def multiple_escape_character(self,c):
return c == "\""
|
python
|
"""
We have discussed Knight’s tour and Rat in a Maze problems in Set 1 and Set 2 respectively. Let us discuss N Queen as another example problem that can be solved using Backtracking.
The N Queen is the problem of placing N chess queens on an N×N chessboard so
that no two queens attack each other. For example, following is a solution for
4 Queen problem.
"""
def get_n_queens(n_queens):
pass
n_queens = 4
expected = [
[
[0, 1, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 1, 0],
],
[
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
],
]
result = get_n_queens(n_queens)
result1 = result == expected
result2 = result[0] == expected[1] and result[1] == expected[0]
assert result1 or result2
print("OK")
|
python
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--text",help="text for the google_speech",
action="store")
args = parser.parse_args()
print ()
from google_speech import Speech
# say "Hello World"
text = args.text
lang = "en"
speech = Speech(text, lang)
#speech.play()
# you can also apply audio effects (using SoX)
# see http://sox.sourceforge.net/sox.html#EFFECTS for full effect documentation
sox_effects = ("speed", "1")
speech.play(sox_effects)
|
python
|
import os
import webapp2
import jinja2
import json
import cgi
import re
import hmac
import hashlib
import random
from string import letters
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
USER_RE = re.compile(r'^[a-zA-Z0-9_-]{3,20}$')
PASS_RE = re.compile(r'^.{3,20}$')
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
signupForm="""
<!DOCTYPE html>
<html>
<head>
<title>Sign Up</title>
<style type="text/css">
.label {text-align: right}
.error {color: red}
</style>
</head>
<body>
<h2>Signup</h2>
<form method="post">
<table>
<tr>
<td class="label">
Username
</td>
<td>
<input type="text" name="username" value="%(username)s">
</td>
<td class="error">
%(user_error)s
</td>
</tr>
<tr>
<td class="label">
Password
</td>
<td>
<input type="password" name="password" value="%(password)s">
</td>
<td class="error">
%(pass_error)s
</td>
</tr>
<tr>
<td class="label">
Verify Password
</td>
<td>
<input type="password" name="verify" value="%(verify)s">
</td>
<td class="error">
%(verify_error)s
</td>
</tr>
<tr>
<td class="label">
Email (optional)
</td>
<td>
<input type="text" name="email" value="%(email)s">
</td>
<td class="error">
%(email_error)s
</td>
</tr>
</table>
<input type="submit">
</form>
</body>
</html>
"""
loginForm="""
<!DOCTYPE html>
<html>
<head>
<title>Login</title>
<style type="text/css">
.label {text-align: right}
.error {color: red}
</style>
</head>
<body>
<h2>Login</h2>
<form method="post">
<table>
<tr>
<td class="label">
Username
</td>
<td>
<input type="text" name="username" value="%(username)s">
</td>
<td class="error">
%(user_error)s
</td>
</tr>
<tr>
<td class="label">
Password
</td>
<td>
<input type="password" name="password" value="%(password)s">
</td>
<td class="error">
%(pass_error)s
</td>
</tr>
</table>
<input type="submit">
</form>
</body>
</html>
"""
def valid_username(username):
return USER_RE.match(username)
def valid_password(password):
return PASS_RE.match(password)
def valid_email(email):
return EMAIL_RE.match(email)
secret='iloveyou'
def hash_cookie(cookie):
return '%s|%s' %(cookie,hmac.new(secret,cookie).hexdigest())
def valid_cookie(hashcookie):
cookie = hashcookie.split('|')[0]
if hashcookie == hash_cookie(cookie):
return cookie
def make_salt():
salt_list = [ random.choice(letters) for x in xrange(5) ]
return ''.join(salt_list)
def hash_password(password,salt=None):
if not salt:
salt=make_salt()
h = hashlib.sha256(password+salt).hexdigest()
return '%s|%s' %(h,salt)
def valid_hashpassword(hashpass,password):
salt=hashpass.split('|')[1]
if hash_password(password,salt=salt)==hashpass:
return True
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
class Signup(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
self.response.write(writeForm(signupForm))
def post(self):
username=self.request.get("username")
password=self.request.get("password")
verify=self.request.get("verify")
email=self.request.get("email")
if not valid_username(username):
user_error="It's not a valid username!"
else:
user_error=''
if not valid_password(password):
pass_error="It's not a valid password!"
else:
pass_error=''
if (password != verify):
verify_error="Password didn't match!"
else:
verify_error=''
if (email):
if not valid_email(email):
email_error="It's not a valid email!"
else:
email_error=''
else:
email_error=''
if (user_error or pass_error or verify_error or email_error):
self.response.headers['Content-Type'] = 'text/html'
self.response.write(writeForm(signupForm,username,password,verify,email,user_error,pass_error,verify_error,\
email_error))
elif User.all().filter('name =',username).get():
user_error = 'This user already exists!'
self.response.write(writeForm(signupForm,username,password,verify,email,user_error,pass_error,verify_error,\
email_error))
else:
h_username=hash_cookie(username)
pw_hash = hash_password(password)
u = User(name=username,pw_hash=pw_hash,email=email)
u.put()
self.response.headers.add_header('Set-Cookie','username=%s;Path=/' %str(h_username))
self.redirect("/thanks") # redirect
class Login(webapp2.RequestHandler):
def get(self):
self.response.write(writeForm(loginForm))
def post(self):
username=self.request.get('username')
password=self.request.get('password')
u=User.all().filter('name =',username).get()
if u and valid_hashpassword(u.pw_hash,password):
h_username=hash_cookie(username)
self.response.headers.add_header('Set-Cookie','username=%s;Path=/' %str(h_username))
self.redirect("/thanks") # redirect
else:
self.response.write(writeForm(loginForm))
class Logout(webapp2.RequestHandler):
def get(self):
self.response.headers.add_header('Set-Cookie','username=;Path=/')
self.redirect("/signup")
class ThanksHandler(webapp2.RequestHandler):
def get(self):
username=self.request.cookies.get('username')
username=valid_cookie(username)
if not username:
self.redirect('/login')
else:
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Welcome, %s' %username)
def writeForm(form,username='',password='',verify='',email='',\
user_error='',pass_error='',verify_error='',email_error=''):
return form %{'username':username,'password':password,\
'verify':verify,'email':email,'user_error':user_error,\
'verify_error':verify_error,'email_error':email_error,'pass_error':pass_error}
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **kw):
self.response.out.write(render_str(template, **kw))
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
class Blogdb(db.Model): # database
subject=db.StringProperty(required=True)
content=db.TextProperty(required=True)
created=db.DateTimeProperty(auto_now_add=True)
class MainPage(BaseHandler):
def get(self):
posts=db.GqlQuery("select * from Blogdb order by created desc")
self.render('mainpage.html',posts=posts)
class MainPageJason(BaseHandler):
def get(self):
main_dict = {}
main_list = []
posts=db.GqlQuery("select * from Blogdb order by created desc")
for post in posts:
main_dict['subject'] = post.subject
main_dict['content'] = post.content
main_dict['created'] = post.created.strftime('%c')
main_list.append(main_dict)
mainJson = json.dumps(main_list)
self.response.headers['Content-type']='application/json; charset=UTF-8'
self.write(mainJson)
class PostPage(BaseHandler):
def get(self,post_id):
# key = db.Key.from_path('Blogdb',int(post_id))
# post = db.get(key)
post = Blogdb.get_by_id(int(post_id))
if not post:
self.error(404)
else:
self.render('permalink.html',post = post)
class postJson(BaseHandler):
def get(self,post_id):
post_dict = {}
post_id = post_id.split('.')[0]
post = Blogdb.get_by_id(int(post_id))
if not post:
self.error(404)
else:
post_dict['subject'] = post.subject
post_dict['content'] = post.content
post_dict['created'] = post.created.strftime('%c')
postjson= json.dumps(post_dict)
self.response.headers['Content-type']='application/json; charset=UTF-8'
self.write(postjson)
class NewpostHandler(BaseHandler):
def get(self):
# self.render('newentry-form.html',subject='',error_subject='',\
# content='',error_content='')
self.render("newentry-form.html")
def post(self):
have_error= False
subject=self.request.get('subject')
content=self.request.get('content').replace('\n','<br>') # in order for the content to be
params=dict(subject=subject,content=content) # displayed properly in the browser
if not subject:
have_error=True
params['error_subject']='You must have a subject!'
if not content:
have_error=True
params['error_content']='You need enter the content!'
if have_error:
self.render('newentry-form.html',**params)
else:
blog=Blogdb(subject=subject,content=content)
blog.put()
# print blog.key().id()
self.redirect('/%s' % str(blog.key().id()))
app = webapp2.WSGIApplication([('/newpost', NewpostHandler),\
('/([0-9]+)',PostPage),
('/.json',MainPageJason),
('/([0-9]+\.json)',postJson),
('/signup', Signup),
('/thanks',ThanksHandler),
('/login',Login),
('/logout',Logout),
('/',MainPage),],debug=True)
|
python
|
import logging
import os
import shutil
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.cur = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1, 5)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "aux" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load_net_config(path):
with open(path, 'r') as f:
net_config = ''
while True:
line = f.readline().strip()
if 'net_type' in line:
net_type = line.split(': ')[-1]
break
else:
net_config += line
return net_config, net_type
def load_model(model, model_path):
logging.info('Start loading the model from ' + model_path)
if 'http' in model_path:
model_addr = model_path
model_path = model_path.split('/')[-1]
if os.path.isfile(model_path):
os.system('rm ' + model_path)
os.system('wget -q ' + model_addr)
model.load_state_dict(torch.load(model_path), strict=False)
logging.info('Loading the model finished!')
def create_exp_dir(path):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.):
"""
Label smoothing implementation.
This function is taken from https://github.com/MIT-HAN-LAB/ProxylessNAS/blob/master/proxyless_nas/utils.py
"""
logsoftmax = nn.LogSoftmax().cuda()
n_classes = pred.size(1)
# convert to one-hot
target = torch.unsqueeze(target, 1)
soft_target = torch.zeros_like(pred)
soft_target.scatter_(1, target, 1)
# label smoothing
soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))
def parse_net_config(net_config):
str_configs = net_config.split('|')
return [eval(str_config) for str_config in str_configs]
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def set_logging(save_path, log_name='log.txt'):
log_format = '%(asctime)s %(message)s'
date_format = '%m/%d %H:%M:%S'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt=date_format)
fh = logging.FileHandler(os.path.join(save_path, log_name))
fh.setFormatter(logging.Formatter(log_format, date_format))
logging.getLogger().addHandler(fh)
def create_save_dir(save_path, job_name):
if job_name != '':
job_name = time.strftime("%Y%m%d-%H%M%S-") + job_name
save_path = os.path.join(save_path, job_name)
create_exp_dir(save_path)
os.system('cp -r ./* '+save_path)
save_path = os.path.join(save_path, 'output')
create_exp_dir(save_path)
else:
save_path = os.path.join(save_path, 'output')
create_exp_dir(save_path)
return save_path, job_name
def latency_measure(module, input_size, batch_size, meas_times, mode='gpu'):
assert mode in ['gpu', 'cpu']
latency = []
module.eval()
input_size = (batch_size,) + tuple(input_size)
input_data = torch.randn(input_size)
if mode=='gpu':
input_data = input_data.cuda()
module.cuda()
for i in range(meas_times):
with torch.no_grad():
start = time.time()
_ = module(input_data)
torch.cuda.synchronize()
if i >= 100:
latency.append(time.time() - start)
print(np.mean(latency) * 1e3, 'ms')
return np.mean(latency) * 1e3
def latency_measure_fw(module, input_data, meas_times):
latency = []
module.eval()
for i in range(meas_times):
with torch.no_grad():
start = time.time()
output_data = module(input_data)
torch.cuda.synchronize()
if i >= 100:
latency.append(time.time() - start)
print(np.mean(latency) * 1e3, 'ms')
return np.mean(latency) * 1e3, output_data
def record_topk(k, rec_list, data, comp_attr, check_attr):
def get_insert_idx(orig_list, data, comp_attr):
start = 0
end = len(orig_list)
while start < end:
mid = (start + end) // 2
if data[comp_attr] < orig_list[mid][comp_attr]:
start = mid + 1
else:
end = mid
return start
if_insert = False
insert_idx = get_insert_idx(rec_list, data, comp_attr)
if insert_idx < k:
rec_list.insert(insert_idx, data)
if_insert = True
while len(rec_list) > k:
rec_list.pop()
return if_insert
def one_hot_tensor(y_batch_tensor, num_classes, device):
y_tensor = torch.cuda.FloatTensor(y_batch_tensor.size(0),
num_classes).fill_(0)
y_tensor[np.arange(len(y_batch_tensor)), y_batch_tensor] = 1.0
return y_tensor
def label_smoothing(y_batch_tensor, num_classes, delta):
y_batch_smooth = (1 - delta - delta / (num_classes - 1)) * \
y_batch_tensor + delta / (num_classes - 1)
return y_batch_smooth
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
class softCrossEntropy(nn.Module):
def __init__(self, reduce=True):
super(softCrossEntropy, self).__init__()
self.reduce = reduce
return
def forward(self, inputs, targets):
"""
:param inputs: predictions
:param targets: target labels in vector form
:return: loss
"""
log_likelihood = -F.log_softmax(inputs, dim=1)
sample_num, class_num = targets.shape
if self.reduce:
loss = torch.sum(torch.mul(log_likelihood, targets)) / sample_num
else:
loss = torch.sum(torch.mul(log_likelihood, targets), 1)
return loss
class CWLoss(nn.Module):
def __init__(self, num_classes, margin=50, reduce=True):
super(CWLoss, self).__init__()
self.num_classes = num_classes
self.margin = margin
self.reduce = reduce
return
def forward(self, logits, targets):
"""
:param inputs: predictions
:param targets: target labels
:return: loss
"""
onehot_targets = one_hot_tensor(targets, self.num_classes,
targets.device)
self_loss = torch.sum(onehot_targets * logits, dim=1)
other_loss = torch.max(
(1 - onehot_targets) * logits - onehot_targets * 1000, dim=1)[0]
loss = -torch.sum(torch.clamp(self_loss - other_loss + self.margin, 0))
if self.reduce:
sample_num = onehot_targets.shape[0]
loss = loss / sample_num
return loss
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Wenbin Li ([email protected])
Date: April 9, 2019
Version: V0
Citation:
@inproceedings{li2019DN4,
title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
booktitle={CVPR},
year={2019}
}
"""
from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import scipy as sp
import scipy.stats
import pdb
# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================
ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
# Load the pre-trained model
model_trained = './results/DN4_miniImageNet_Conv64F_5Way_5Shot_K3/model_best.pth.tar'
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='test', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default=model_trained, type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
# Few-shot parameters #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=600, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=5, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True
# ======================================= Define functions =============================================
def validate(val_loader, model, criterion, epoch_index, F_txt):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
accuracies = []
end = time.time()
for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):
# Convert query and support images
query_images = torch.cat(query_images, 0)
input_var1 = query_images.cuda()
input_var2 = []
for i in range(len(support_images)):
temp_support = support_images[i]
temp_support = torch.cat(temp_support, 0)
temp_support = temp_support.cuda()
input_var2.append(temp_support)
# Deal with the target
target = torch.cat(query_targets, 0)
target = target.cuda()
# Calculate the output
output = model(input_var1, input_var2)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, _ = accuracy(output, target, topk=(1, 3))
losses.update(loss.item(), query_images.size(0))
top1.update(prec1[0], query_images.size(0))
accuracies.append(prec1)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#============== print the intermediate results ==============#
if episode_index % opt.print_freq == 0 and episode_index != 0:
print('Test-({0}): [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))
print('Test-({0}): [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)
print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)
return top1.avg, accuracies
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_confidence_interval(data, confidence=0.95):
a = [1.0*np.array(data[i].cpu()) for i in range(len(data))]
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m,h
# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'Test_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)
# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0
model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch',
init_type='normal', use_gpu=opt.cuda)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
epoch_index = checkpoint['epoch_index']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)
if opt.ngpu > 1:
model = nn.DataParallel(model, range(opt.ngpu))
# print the architecture of the network
print(model)
print(model, file=F_txt)
# ============================================ Testing phase ========================================
print('\n............Start testing............')
start_time = time.time()
repeat_num = 5 # repeat running the testing code several times
total_accuracy = 0.0
total_h = np.zeros(repeat_num)
total_accuracy_vector = []
for r in range(repeat_num):
print('===================================== Round %d =====================================' %r)
print('===================================== Round %d =====================================' %r, file=F_txt)
# ======================================= Folder of Datasets =======================================
# image transform & normalization
ImgTransform = transforms.Compose([
transforms.Resize((opt.imageSize, opt.imageSize)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
testset = Imagefolder_csv(
data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
)
print('Testset: %d-------------%d' %(len(testset), r), file=F_txt)
# ========================================== Load Datasets =========================================
test_loader = torch.utils.data.DataLoader(
testset, batch_size=opt.testepisodeSize, shuffle=True,
num_workers=int(opt.workers), drop_last=True, pin_memory=True
)
# =========================================== Evaluation ==========================================
prec1, accuracies = validate(test_loader, model, criterion, epoch_index, F_txt)
test_accuracy, h = mean_confidence_interval(accuracies)
print("Test accuracy", test_accuracy, "h", h[0])
print("Test accuracy", test_accuracy, "h", h[0], file=F_txt)
total_accuracy += test_accuracy
total_accuracy_vector.extend(accuracies)
total_h[r] = h
aver_accuracy, _ = mean_confidence_interval(total_accuracy_vector)
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean())
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean(), file=F_txt)
F_txt.close()
# ============================================== Testing End ==========================================
|
python
|
# Copyright 2016 Anselm Binninger, Thomas Maier, Ralph Schaumann
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import logging
from gossip.control import convert
from gossip.util.message import MessageGossipPeerResponse, MessageGossipPeerRequest, MessageGossipPeerInit, \
MessageGossipPeerUpdate, MessageGossipAnnounce
from gossip.util.packing import pack_gossip_peer_response, pack_gossip_peer_request, pack_gossip_peer_init, \
pack_gossip_peer_update, pack_gossip_announce, PEER_UPDATE_TYPE_PEER_LOST, PEER_UPDATE_TYPE_PEER_FOUND
from gossip.util.message_code import MESSAGE_CODE_ANNOUNCE, MESSAGE_CODE_PEER_REQUEST, MESSAGE_CODE_PEER_RESPONSE, \
MESSAGE_CODE_PEER_UPDATE, MESSAGE_CODE_PEER_INIT
from gossip.util.queue_item_types import QUEUE_ITEM_TYPE_SEND_MESSAGE, QUEUE_ITEM_TYPE_CONNECTION_LOST, \
QUEUE_ITEM_TYPE_RECEIVED_MESSAGE, QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION, QUEUE_ITEM_TYPE_NEW_CONNECTION
__author__ = 'Anselm Binninger, Thomas Maier, Ralph Schaumann'
class P2PController(multiprocessing.Process):
def __init__(self, from_p2p_queue, to_p2p_queue, to_api_queue, p2p_connection_pool, p2p_server_address,
announce_message_cache, update_message_cache, api_registration_handler, max_ttl,
bootstrapper_address=None):
""" This controller is responsible for all incoming messages from the P2P layer. If a P2P client sends any
message, this controller handles it in various ways.
:param from_p2p_queue: Used by the P2P layer for incoming messages and commands
:param to_p2p_queue: Messages and commands for the P2P layer are sent through this queue
:param to_api_queue: Messages and commands for the API layer are sent through this queue
:param p2p_connection_pool: Pool which contains all P2P connections/clients/sockets
:param p2p_server_address: The P2P server address for this gossip instance
:param announce_message_cache: Message cache which contains announce messages.
:param update_message_cache: Message cache for peer update messages
:param api_registration_handler: Used for registrations (via NOTIFY message) from API clients
:param max_ttl: Max. amount of hops until messages will be dropped
:param bootstrapper_address: (optional) dict to specify the bootstrapper {'host': <IPv4>: 'port': <int(port)>}
"""
multiprocessing.Process.__init__(self)
self.from_p2p_queue = from_p2p_queue
self.to_p2p_queue = to_p2p_queue
self.to_api_queue = to_api_queue
self.p2p_connection_pool = p2p_connection_pool
self.p2p_server_address = p2p_server_address
self.announce_message_cache = announce_message_cache
self.update_message_cache = update_message_cache
self.api_registration_handler = api_registration_handler
self.max_ttl = max_ttl
self.bootstrapper_address = bootstrapper_address
def run(self):
""" Typical run method which is used to handle P2P messages and commands. It reacts on incoming messages with
changing the state of Gossip internally or by sending new messages resp. establishing new connections. """
logging.info('%s started - PID: %s' % (type(self).__name__, self.pid))
# Bootstrapping part
if self.bootstrapper_address:
bootstrapper_identifier = '%s:%d' % (self.bootstrapper_address['host'], self.bootstrapper_address['port'])
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION, 'identifier': bootstrapper_identifier})
self.send_peer_request(bootstrapper_identifier)
# Usual controller part
while True:
queue_item = self.from_p2p_queue.get()
queue_item_type = queue_item['type']
message = queue_item['message']
senders_identifier = queue_item['identifier']
if queue_item_type == QUEUE_ITEM_TYPE_RECEIVED_MESSAGE:
msg_code = message.get_values()['code']
if msg_code == MESSAGE_CODE_ANNOUNCE:
logging.debug('P2PController | Handle received announce (%d): %s' % (MESSAGE_CODE_ANNOUNCE,
message))
# Spread message via API layer (only registered clients) if it's unknown until now
msg_id = self.announce_message_cache.add_message(message)
if msg_id:
logging.info('P2PController | Spread message (id: %d) through API layer' % msg_id)
# Change ttl and create new announce message
ttl = message.get_values()['TTL']
if ttl > 1 or ttl == 0:
ttl = ttl-1 if ttl > 1 else 0
packed_announce_msg = pack_gossip_announce(ttl, message.get_values()['type'],
message.get_values()['message'])['data']
announce_msg = MessageGossipAnnounce(packed_announce_msg)
# Communication with API clients works with notification messages only. Therefore we have to
# convert the announce message.
notification_msg = convert.from_announce_to_notification(msg_id, announce_msg)
for receiver in self.api_registration_handler.get_registrations(message.data_type):
if receiver != senders_identifier:
self.to_api_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': receiver,
'message': notification_msg})
else:
logging.info('P2PController | Discard message (already known).')
elif msg_code == MESSAGE_CODE_PEER_REQUEST:
# Someone wants to know our known identifiers
logging.debug('P2PController | Handle received peer request (%d): %s' % (MESSAGE_CODE_PEER_REQUEST,
message))
# The peer request message contains the server address of the other peer
peer_server_identifier = message.get_values()['p2p_server_address']
self.p2p_connection_pool.update_connection(senders_identifier, peer_server_identifier)
# Build identifier list BUT exclude the identifier of the requesting peer!
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port'])
known_server_identifiers = self.p2p_connection_pool.get_server_identifiers(
identifier_to_exclude=[peer_server_identifier, own_p2p_server_identifier])
# Send the assembled identifier list
packed_data = pack_gossip_peer_response(known_server_identifiers)['data']
peer_response_msg = MessageGossipPeerResponse(packed_data)
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': senders_identifier,
'message': peer_response_msg})
logging.debug('P2PController | Answering with peer response (%d): %s' % (MESSAGE_CODE_PEER_RESPONSE,
peer_response_msg))
# We've got the server identifier with the peer request, so spread it to anyone we know
senders_server_identifier = self.p2p_connection_pool.get_server_identifier(senders_identifier)
self.send_peer_update(senders_identifier, senders_server_identifier, self.max_ttl)
elif msg_code == MESSAGE_CODE_PEER_INIT:
# Someone wants to inform us about his server identifier
logging.debug('P2PController | Handle received peer init (%d): %s' % (MESSAGE_CODE_PEER_INIT,
message))
# The peer request message contains the server address of the other peer
peer_server_identifier = message.get_values()['p2p_server_address']
self.p2p_connection_pool.update_connection(senders_identifier, peer_server_identifier)
# We've got the server identifier with the peer init, so spread it to anyone we know
senders_server_identifier = self.p2p_connection_pool.get_server_identifier(senders_identifier)
self.send_peer_update(senders_identifier, senders_server_identifier, self.max_ttl)
elif msg_code == MESSAGE_CODE_PEER_RESPONSE:
# We received the known identifiers of someone
logging.debug('P2PController | Handle received peer response (%d): %s'
% (MESSAGE_CODE_PEER_RESPONSE, message))
# Use the peer response only if there is space for new connections in the pool
if self.p2p_connection_pool.get_capacity() > 0:
received_server_identifiers = message.get_values()['data']
new_identifiers = self.p2p_connection_pool.filter_new_server_identifiers(
received_server_identifiers)
# If the peer response provides new identifiers, we establish a new connection with them
if len(new_identifiers) > 0:
for new_identifier in new_identifiers:
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION,
'identifier': new_identifier})
# Send initial message
logging.debug('P2PController | Sending peer init (%d): %s' % (MESSAGE_CODE_PEER_INIT,
message))
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port'])
packed_data = pack_gossip_peer_init(own_p2p_server_identifier)['data']
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE,
'identifier': new_identifier,
'message': MessageGossipPeerInit(packed_data)})
# Stop if the pool is full
if self.p2p_connection_pool.get_capacity() <= 0:
break
else:
logging.debug('P2PController | Discarding message (%d) because pool is full!' % msg_code)
elif msg_code == MESSAGE_CODE_PEER_UPDATE:
# We received a peer update of someone
logging.debug('P2PController | Handle received peer update (%d): %s' % (MESSAGE_CODE_PEER_UPDATE,
message))
new_server_identifier = message.get_values()['address']
update_type = message.get_values()['update_type']
ttl = message.get_values()['ttl']
if ttl < int(self.max_ttl/2):
if update_type == PEER_UPDATE_TYPE_PEER_FOUND:
# Use the peer update only if there is space for a new connection in the pool
if self.p2p_connection_pool.get_capacity() > 0:
new_identifiers = self.p2p_connection_pool.filter_new_server_identifiers(
[new_server_identifier])
# If the peer update provides a new identifier, we establish a new connection with it
for new_identifier in new_identifiers:
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION,
'identifier': new_identifier})
# Send initial message
logging.debug('P2PController | Sending peer init (%d): %s' % (MESSAGE_CODE_PEER_INIT,
message))
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port'])
packed_data = pack_gossip_peer_init(own_p2p_server_identifier)['data']
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE,
'identifier': new_identifier,
'message': MessageGossipPeerInit(packed_data)})
else:
logging.debug('P2PController | Discarding message (%d) because pool is full' % msg_code)
elif update_type == PEER_UPDATE_TYPE_PEER_LOST:
# Currently a peer update of type PEER_UPDATE_TYPE_PEER_LOST does not need to be handled
pass
# If we don't know the peer update already, spread it
if ttl > 1:
ttl -= 1
self.send_peer_update(senders_identifier, new_server_identifier, ttl)
elif ttl == 0: # A ttl of 0 means that the message is unstoppable!
self.send_peer_update(senders_identifier, new_server_identifier, ttl)
else:
logging.debug('P2PController | Discarding message (%d)' % msg_code)
elif queue_item_type == QUEUE_ITEM_TYPE_CONNECTION_LOST:
# A connection has been disconnected from this instance
logging.debug('P2PController | One connection lost, try to get a new one %s' % senders_identifier)
random_identifier = self.p2p_connection_pool.get_random_identifier(senders_identifier)
if random_identifier:
self.send_peer_request(random_identifier)
elif queue_item_type == QUEUE_ITEM_TYPE_NEW_CONNECTION:
# Our instance know a new connection
senders_server_identifier = self.p2p_connection_pool.get_server_identifier(senders_identifier)
# We can inform everyone only if we know the server identifier of the sender
if senders_server_identifier:
self.send_peer_update(senders_identifier, senders_server_identifier, self.max_ttl)
else:
logging.debug('P2PController | Don\'t know the server identifier of the new connection, wait for'
' peer server address of %s' % senders_identifier)
self.exchange_messages(senders_identifier)
def send_peer_update(self, senders_identifier, senders_server_identifier, ttl):
""" Sends peer updates to several peers.
:param senders_identifier: Identifier of the sender we received this update from
:param senders_server_identifier: Server identifier of the changed peer
:param ttl: ttl to set in the new update messages
"""
packed_data = pack_gossip_peer_update(senders_server_identifier, ttl, PEER_UPDATE_TYPE_PEER_FOUND)['data']
peer_update_msg = MessageGossipPeerUpdate(packed_data)
msg_id = self.update_message_cache.add_message(peer_update_msg, valid=True)
if msg_id and senders_server_identifier != '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port']):
logging.debug('P2PController | Spread information about new connection %s' % senders_identifier)
identifiers = self.p2p_connection_pool.get_identifiers()
for identifier in identifiers:
if identifier not in [senders_identifier, senders_server_identifier]:
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': identifier,
'message': peer_update_msg})
def send_peer_request(self, peer_request_identifier):
""" Sends a peer request
:param peer_request_identifier: The identifier dict of the receiving peer
"""
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'], self.p2p_server_address['port'])
packed_msg = pack_gossip_peer_request(own_p2p_server_identifier)
peer_request_msg = MessageGossipPeerRequest(packed_msg['data'])
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': peer_request_identifier,
'message': peer_request_msg})
def exchange_messages(self, peer_identifier):
""" Send messages to new connected peer.
:param peer_identifier: Receiving peer
"""
logging.debug('P2PController | Exchanging messages with (%s)' % peer_identifier)
for message in self.announce_message_cache.iterator():
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': peer_identifier,
'message': message["message"]})
|
python
|
import unittest
import pyarrow
import pymarrow
import pandas as pd
class TestPyMarrow(unittest.TestCase):
def test_add_index(self):
batch = pyarrow.RecordBatch.from_arrays([
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
], ["a", "b"])
actual = pymarrow.add_index(batch, ["a"])
expected = pyarrow.RecordBatch.from_arrays([
pyarrow.array([4, 3, 2, 1, 0], pyarrow.int8()),
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
], ["__marrow_index", "a", "b"], metadata={"_marrow:index": "a"})
pd.testing.assert_frame_equal(actual.to_pandas(), expected.to_pandas())
self.assertTrue(actual.equals(expected))
def test_sort(self):
batch = pyarrow.RecordBatch.from_arrays([
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
], ["a", "b"])
actual = pymarrow.sort(batch, ["a"])
expected = pyarrow.RecordBatch.from_arrays([
[1, 2, 3, 4, 5],
[5, 4, 3, 2, 1]
], ["a", "b"], metadata={"_marrow:index": "a"})
pd.testing.assert_frame_equal(actual.to_pandas(), expected.to_pandas())
self.assertTrue(actual.equals(expected))
def test_merge(self):
batch1 = pyarrow.RecordBatch.from_arrays([
[1, 1, 2, 3, 4, 5],
[6, 5, 4, 3, 2, 1]
], ["a", "b"], metadata={"_marrow:index": "a"})
batch2 = pyarrow.RecordBatch.from_arrays([
[1, 2, 3, 4, 5, 5],
[5, 4, 3, 2, 1, 0]
], ["a", "c"], metadata={"_marrow:index": "a"})
actual = pymarrow.merge(batch1, batch2, on=["a"], how="inner")
expected = pyarrow.RecordBatch.from_arrays([
[1, 1, 2, 3, 4, 5, 5],
[6, 5, 4, 3, 2, 1, 1],
[5, 5, 4, 3, 2, 1, 0]
], ["a", "b", "c"])
pd.testing.assert_frame_equal(actual.to_pandas(), expected.to_pandas())
self.assertTrue(actual.equals(expected))
if __name__ == '__main__':
unittest.main()
|
python
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .forms import PDBForm
from .runVis import LoadModel, DOPE, HDXRepr, RepLabels, CustomRes
import urllib
d_obj = {
"1":RepLabels,
"2":HDXRepr,
"3":DOPE,
"4":CustomRes
}
d_desc = {
"1":"Selected residues are shown in red as ball and stick representation.",
"2":{
"folding":"Green: Early, Yellow: Intermediate, Red: Late.",
"stability":"Green: Strong, Yellow: Medium, Red: Weak."
},
"3":"Lighter residues indicate better DOPE scoring regions.",
"4":"Color gradient from white to dark red indicates low -> high scoring."
}
# Create your views here.
def index(request):
if request.method == 'GET':
context = {'form':PDBForm()}
return render(request,"visualise/home.html",context)
if request.method == 'POST':
form = PDBForm(request.POST, request.FILES)
print("Files")
print(request.FILES)
if form.is_valid():
input_data = form.cleaned_data
if 'data' not in request.FILES:
request.FILES['data']=None
obj = d_obj[input_data["choice"]](input_data['PDB'],request.FILES['data'])
url = obj.open_url(open_link=False,print_out=True,data_label=input_data["hdx_opt"])
print(len(url))
context = {'state':"\n".join(obj.state)}
if input_data["choice"] == "2":
desc = d_desc["2"][input_data["hdx_opt"]]
else:
desc = d_desc[input_data["choice"]]
try:
req = urllib.request.urlopen(url)
assert req.getcode()==200
req.close()
return render(request,"visualise/results.html",{'url':url,'desc':desc})
except:
filename = "%s_state.txt" % input_data['PDB']
content = "\n".join(obj.state)
# response = HttpResponse(content, content_type='text/plain')
response = render(request,"visualise/results.html",{'url':'https://www.ncbi.nlm.nih.gov/Structure/icn3d/full.html','desc':desc})
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
print(response)
return response
return HttpResponse("Testing")
def help(request):
return render(request,"visualise/help.html")
def results(request):
return render(request,"visualise/results.html",{'url':'https://www.ncbi.nlm.nih.gov/Structure/icn3d/full.html'})
|
python
|
#!/usr/bin/env python
from webkitpy.benchmark_runner.generic_factory import GenericFactory
class HTTPServerDriverFactory(GenericFactory):
products = {}
|
python
|
# Copyright (c) 2019-2021, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Iterable, Tuple, Optional, List, Iterator
import abc
import warnings
from ezdxf.math import Vec3, Vec2
if TYPE_CHECKING:
from ezdxf.math import Vertex, AnyVec
__all__ = ["BoundingBox2d", "BoundingBox", "AbstractBoundingBox"]
class AbstractBoundingBox:
__slots__ = ("extmin", "extmax")
def __init__(self, vertices: Iterable["Vertex"] = None):
self.extmax: Optional["AnyVec"] = None
self.extmin: Optional["AnyVec"] = None
if vertices is not None:
try:
self.extmin, self.extmax = self.extends_detector(vertices)
except ValueError:
# No or invalid data creates an empty BoundingBox
pass
def copy(self):
box = self.__class__()
box.extmin = self.extmin
box.extmax = self.extmax
return box
def __str__(self) -> str:
return f"[{self.extmin}, {self.extmax}]"
def __repr__(self) -> str:
name = self.__class__.__name__
if self.has_data:
return f"{name}({self.__str__()})"
else:
return f"{name}()"
def __iter__(self) -> Iterator["AnyVec"]:
if self.has_data:
yield self.extmin
yield self.extmax
@abc.abstractmethod
def extends_detector(
self, vertices: Iterable["Vertex"]
) -> Tuple["AnyVec", "AnyVec"]:
pass
@property
@abc.abstractmethod
def is_empty(self) -> bool:
...
@abc.abstractmethod
def inside(self, vertex: "Vertex") -> bool:
...
@abc.abstractmethod
def has_intersection(self, other: "AbstractBoundingBox") -> bool:
...
@abc.abstractmethod
def has_overlap(self, other: "AbstractBoundingBox") -> bool:
...
@abc.abstractmethod
def intersection(
self, other: "AbstractBoundingBox"
) -> "AbstractBoundingBox":
...
def contains(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if the `other` bounding box is completely inside
of this bounding box.
.. versionadded:: 0.17.2
"""
return self.inside(other.extmin) and self.inside(other.extmax)
def any_inside(self, vertices: Iterable["Vertex"]) -> bool:
"""Returns ``True`` if any vertex is inside this bounding box.
Vertices at the box border are inside!
"""
if self.has_data:
return any(self.inside(v) for v in vertices)
return False
def all_inside(self, vertices: Iterable["Vertex"]) -> bool:
"""Returns ``True`` if all vertices are inside this bounding box.
Vertices at the box border are inside!
"""
if self.has_data:
# all() returns True for an empty set of vertices
has_any = False
for v in vertices:
has_any = True
if not self.inside(v):
return False
return has_any
return False
@property
def has_data(self) -> bool:
"""Returns ``True`` if the bonding box has known limits."""
return self.extmin is not None
@property
def size(self):
"""Returns size of bounding box."""
return self.extmax - self.extmin
@property
def center(self):
"""Returns center of bounding box."""
return self.extmin.lerp(self.extmax)
def extend(self, vertices: Iterable["Vertex"]) -> None:
"""Extend bounds by `vertices`.
Args:
vertices: iterable of Vertex objects
"""
v = list(vertices)
if not v:
return
if self.has_data:
v.extend([self.extmin, self.extmax])
self.extmin, self.extmax = self.extends_detector(v)
def union(self, other: "AbstractBoundingBox"):
"""Returns a new bounding box as union of this and `other` bounding
box.
"""
vertices: List["AnyVec"] = []
if self.has_data:
vertices.extend(self)
if other.has_data:
vertices.extend(other)
return self.__class__(vertices)
def rect_vertices(self) -> Tuple[Vec2, ...]:
"""Returns the corners of the bounding box in the xy-plane as
:class:`Vec2` objects.
"""
if self.has_data: # extmin is not None!
x0, y0, *_ = self.extmin # type: ignore
x1, y1, *_ = self.extmax # type: ignore
return Vec2(x0, y0), Vec2(x1, y0), Vec2(x1, y1), Vec2(x0, y1)
else:
raise ValueError("empty bounding box")
def grow(self, value: float) -> None:
"""Grow or shrink the bounding box by an uniform value in x, y and
z-axis. A negative value shrinks the bounding box.
Raises :class:`ValueError` for shrinking the size of the bounding box to
zero or below in any dimension.
"""
if self.has_data:
if value < 0.0:
min_ext = min(self.size)
if -value >= min_ext / 2.0:
raise ValueError("shrinking one or more dimensions <= 0")
self.extmax += Vec3(value, value, value) # type: ignore
self.extmin += Vec3(-value, -value, -value) # type: ignore
class BoundingBox(AbstractBoundingBox):
"""3D bounding box.
Args:
vertices: iterable of ``(x, y, z)`` tuples or :class:`Vec3` objects
"""
__slots__ = ("extmin", "extmax")
@property
def is_empty(self) -> bool:
"""Returns ``True`` if the bounding box is empty. The bounding box has a
size of 0 in any or all dimensions or is undefined.
"""
if self.has_data:
sx, sy, sz = self.size
return sx * sy * sz == 0.0
return True
def extends_detector(
self, vertices: Iterable["Vertex"]
) -> Tuple[Vec3, Vec3]:
return extends3d(vertices)
def inside(self, vertex: "Vertex") -> bool:
"""Returns ``True`` if `vertex` is inside this bounding box.
Vertices at the box border are inside!
"""
if self.extmin is None or self.extmax is None:
return False
x, y, z = Vec3(vertex).xyz
xmin, ymin, zmin = self.extmin.xyz
xmax, ymax, zmax = self.extmax.xyz
return (
(xmin <= x <= xmax) and (ymin <= y <= ymax) and (zmin <= z <= zmax)
)
def has_intersection(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but does
not include touching bounding boxes, see also :meth:`has_overlap`::
bbox1 = BoundingBox([(0, 0, 0), (1, 1, 1)])
bbox2 = BoundingBox([(1, 1, 1), (2, 2, 2)])
assert bbox1.has_intersection(bbox2) is False
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
# Check for a separating axis:
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
o_min = Vec3(other.extmin) # could be a 2D bounding box
o_max = Vec3(other.extmax) # could be a 2D bounding box
# Check for a separating axis:
if self.extmin.x >= o_max.x:
return False
if self.extmax.x <= o_min.x:
return False
if self.extmin.y >= o_max.y:
return False
if self.extmax.y <= o_min.y:
return False
if self.extmin.z >= o_max.z:
return False
if self.extmax.z <= o_min.z:
return False
return True
def intersect(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"intersect() is deprecated, replaced by has_intersection()",
DeprecationWarning,
)
return self.has_intersection(other)
def has_overlap(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but
in contrast to :meth:`has_intersection` includes touching bounding boxes too::
bbox1 = BoundingBox([(0, 0, 0), (1, 1, 1)])
bbox2 = BoundingBox([(1, 1, 1), (2, 2, 2)])
assert bbox1.has_overlap(bbox2) is True
.. versionadded:: 0.17.2
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
# Check for a separating axis:
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
o_min = Vec3(other.extmin) # could be a 2D bounding box
o_max = Vec3(other.extmax) # could be a 2D bounding box
# Check for a separating axis:
if self.extmin.x > o_max.x:
return False
if self.extmax.x < o_min.x:
return False
if self.extmin.y > o_max.y:
return False
if self.extmax.y < o_min.y:
return False
if self.extmin.z > o_max.z:
return False
if self.extmax.z < o_min.z:
return False
return True
def overlap(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"overlap() is deprecated, replaced by has_overlap()",
DeprecationWarning,
)
return self.has_overlap(other)
def cube_vertices(self) -> Tuple[Vec3, ...]:
"""Returns the 3D corners of the bounding box as :class:`Vec3` objects."""
if self.extmin is not None and self.extmax is not None:
x0, y0, z0 = self.extmin
x1, y1, z1 = self.extmax
return (
Vec3(x0, y0, z0),
Vec3(x1, y0, z0),
Vec3(x1, y1, z0),
Vec3(x0, y1, z0),
Vec3(x0, y0, z1),
Vec3(x1, y0, z1),
Vec3(x1, y1, z1),
Vec3(x0, y1, z1),
)
else:
raise ValueError("empty bounding box")
def intersection(self, other: "AbstractBoundingBox") -> "BoundingBox":
"""Returns the bounding box of the intersection cube of both
3D bounding boxes. Returns an empty bounding box if the intersection
volume is 0.
"""
new_bbox = self.__class__()
if not self.has_intersection(other):
return new_bbox
s_min_x, s_min_y, s_min_z = Vec3(self.extmin)
o_min_x, o_min_y, o_min_z = Vec3(other.extmin)
s_max_x, s_max_y, s_max_z = Vec3(self.extmax)
o_max_x, o_max_y, o_max_z = Vec3(other.extmax)
new_bbox.extend(
[
(
max(s_min_x, o_min_x),
max(s_min_y, o_min_y),
max(s_min_z, o_min_z),
),
(
min(s_max_x, o_max_x),
min(s_max_y, o_max_y),
min(s_max_z, o_max_z),
),
]
)
return new_bbox
class BoundingBox2d(AbstractBoundingBox):
"""Optimized 2D bounding box.
Args:
vertices: iterable of ``(x, y[, z])`` tuples or :class:`Vec3` objects
"""
__slots__ = ("extmin", "extmax")
@property
def is_empty(self) -> bool:
"""Returns ``True`` if the bounding box is empty. The bounding box has a
size of 0 in any or all dimensions or is undefined.
"""
if self.has_data:
sx, sy = self.size
return sx * sy == 0.0
return True
def extends_detector(
self, vertices: Iterable["Vertex"]
) -> Tuple[Vec2, Vec2]:
return extends2d(vertices)
def inside(self, vertex: "Vertex") -> bool:
"""Returns ``True`` if `vertex` is inside this bounding box.
Vertices at the box border are inside!
"""
if self.extmin is None or self.extmax is None:
return False
v = Vec2(vertex)
min_ = self.extmin
max_ = self.extmax
return (min_.x <= v.x <= max_.x) and (min_.y <= v.y <= max_.y)
def has_intersection(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but does
not include touching bounding boxes, see also :meth:`has_overlap`::
bbox1 = BoundingBox2d([(0, 0), (1, 1)])
bbox2 = BoundingBox2d([(1, 1), (2, 2)])
assert bbox1.has_intersection(bbox2) is False
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
# Check for a separating axis:
if self.extmin.x >= other.extmax.x:
return False
if self.extmax.x <= other.extmin.x:
return False
if self.extmin.y >= other.extmax.y:
return False
if self.extmax.y <= other.extmin.y:
return False
return True
def intersect(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"intersect() is deprecated, replaced by has_intersection()",
DeprecationWarning,
)
return self.has_intersection(other)
def intersection(self, other: "AbstractBoundingBox") -> "BoundingBox2d":
"""Returns the bounding box of the intersection rectangle of both
2D bounding boxes. Returns an empty bounding box if the intersection
area is 0.
"""
new_bbox = self.__class__()
if not self.has_intersection(other):
return new_bbox
s_min_x, s_min_y = Vec2(self.extmin)
o_min_x, o_min_y = Vec2(other.extmin)
s_max_x, s_max_y = Vec2(self.extmax)
o_max_x, o_max_y = Vec2(other.extmax)
new_bbox.extend(
[
(max(s_min_x, o_min_x), max(s_min_y, o_min_y)),
(min(s_max_x, o_max_x), min(s_max_y, o_max_y)),
]
)
return new_bbox
def has_overlap(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but
in contrast to :meth:`has_intersection` includes touching bounding boxes too::
bbox1 = BoundingBox2d([(0, 0), (1, 1)])
bbox2 = BoundingBox2d([(1, 1), (2, 2)])
assert bbox1.has_overlap(bbox2) is True
.. versionadded:: 0.17.2
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
# Check for a separating axis:
if self.extmin.x > other.extmax.x:
return False
if self.extmax.x < other.extmin.x:
return False
if self.extmin.y > other.extmax.y:
return False
if self.extmax.y < other.extmin.y:
return False
return True
def overlap(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"overlap() is deprecated, replaced by has_overlap()",
DeprecationWarning,
)
return self.has_overlap(other)
def extends3d(vertices: Iterable["Vertex"]) -> Tuple[Vec3, Vec3]:
minx, miny, minz = None, None, None
maxx, maxy, maxz = None, None, None
for v in vertices:
v = Vec3(v)
if minx is None:
minx, miny, minz = v.xyz # type: ignore
maxx, maxy, maxz = v.xyz # type: ignore
else:
x, y, z = v.xyz
if x < minx:
minx = x
elif x > maxx:
maxx = x
if y < miny:
miny = y
elif y > maxy:
maxy = y
if z < minz:
minz = z
elif z > maxz:
maxz = z
if minx is None:
raise ValueError("No vertices give.")
return Vec3(minx, miny, minz), Vec3(maxx, maxy, maxz)
def extends2d(vertices: Iterable["Vertex"]) -> Tuple[Vec2, Vec2]:
minx, miny = None, None
maxx, maxy = None, None
for v in vertices:
v = Vec2(v)
x, y = v.x, v.y # type: ignore
if minx is None:
minx = x
maxx = x
miny = y
maxy = y
else:
if x < minx:
minx = x
elif x > maxx:
maxx = x
if y < miny:
miny = y
elif y > maxy:
maxy = y
if minx is None:
raise ValueError("No vertices give.")
return Vec2(minx, miny), Vec2(maxx, maxy)
|
python
|
#
# Author: Robert Abram <[email protected]>
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
#
# Signal handlers for model change events, see: proj.settings.appconfig.
# These are a great way to log user activity.
#
from datetime import datetime
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from proj.middleware import get_current_user
# These are apps that should not be remotely logged
LOGGING_EXCLUDED_APPS = (
'auth',
'axes',
'oauth2_provider',
)
# These are models that should not be remotely logged
LOGGING_EXCLUDED_MODELS = (
'AccessLog',
'User',
'SystemActions',
)
# These are model fields that should not be remotely logged
LOGGING_EXCLUDED_FIELDS = (
'id',
'http_passwd',
)
def signal_model_pre_save(sender, instance, **kwargs):
pass
def signal_model_post_save(sender, instance, **kwargs):
pass
def signal_model_pre_delete(sender, instance, **kwargs):
pass
|
python
|
from common import *
import collections
import numpy as np
def test_astype(ds_local):
ds = ds_local
ds_original = ds.copy()
#ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now
ds['x'] = ds['x'].astype('f4')
assert ds.x.evaluate().dtype == np.float32
assert ds.x.tolist() == ds_original.x.as_numpy().evaluate().astype(np.float32).tolist()
def test_astype_str():
df = vaex.from_arrays(x=['10,010', '-50,0', '11,111'])
df['x'] = df['x'].str.replace(',', '').evaluate()
df['x'] = (df['x'].astype('float')).astype('int64').evaluate()
assert df.columns['x'].dtype == np.int64
assert df.x.dtype == np.int64
def test_astype_dtype():
df = vaex.from_arrays(x=[0, 1])
assert df.x.astype(str).data_type() in [pa.string(), pa.large_string()]
df = vaex.from_arrays(x=[np.nan, 1])
# assert df.x.astype(str).dtype == vaex.column.str_type
assert df.x.astype(str).data_type() in [pa.string(), pa.large_string()]
|
python
|
""" File: P3_semi_supervised_topic_modeling.py
Description: Loads a previously created pre-processed chat corpus, then performs
semi-supervised topic modeling utilizing CorEx and GuidedLDA.
INPUT FILES:
0) anchors.txt - anchor/seed words each on their own line
Previously created preprocessed chat corpus from either:
1) wholeChatsFilePOS_N_ADJ_V.txt -- preprocessing keeping nouns, adjectives, and verbs
2) wholeChatsFilePOS_N_ADJ.txt -- preprocessing keeping nouns and adjectives
3) wholeChatsFile.txt -- NO POS preprocessing so all parts of speech
4) onlyQuestionsFile.txt -- Only initial question of chats
OUTPUT FILES:
1) "raw_" text (.txt) file listing topics with each word scored
2) "LDA_" text (.txt) file containing only the text for the
specified number of topics with the specified number of words per topic
Acknowledgements:
Here we are used the CorEx (Correlation Explanation) package available at GitHub:
https://github.com/gregversteeg/corex_topic
Here we are used the GuidedLDA package is available at GitHub:
https://github.com/vi3k6i5/GuidedLDA
NOTE: We had difficulty installing GuidedLDA, but we were finally successful
by following the work-around posted at:
https://github.com/dex314/GuidedLDA_WorkAround
"""
import os.path
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from corextopic import corextopic as ct
import pandas as pd
import nltk
from time import time
import re, sys, random, math
import numpy as np
from lda import guidedlda as glda
from lda import glda_datasets as gldad
from collections import Counter
from timeit import default_timer as timer
from P2_utility_functions import *
from P3_utility_functions import *
def main():
print('Welcome to Phase 3 which runs the semi-supervised topic modeling techniques.',
'\n\nYou should have first run Phase 1 to pre-process your chat data.',
'\nIt would generate cleaned chat files varying the parts of speech or question-only.',
'\nFiles generated are: wholeChatsFile.txt, wholeChatsFilePOS_N_ADJ_V.txt,',
'\nwholeChatsFilePOS_N_ADJ.txt, and onlyQuestionsFile.txt.\n\n')
print('\n\nYou could have also run Phase 2 to execute unsupervised topic modeling techniques.',
'\nIt would generate files: possible_2_word_anchors.txt and possible_3_word_anchors.txt which',
'\nyou might use to create a text-file (.txt) with anchors one per line.\n')
prompt = "\nStep 1. Please input the pre-processed (.txt) file." + \
'\n(For example: "wholeChatsFile.txt"):'
fileName = getFileName(prompt)
chats = readChatCorpusFile(fileName)
prompt = "\nStep 2. Please input the anchors/seeds (.txt) file." + \
'\n(For example: "anchors.txt"):'
fileName = getFileName(prompt)
anchorList = readAnchorsFile(fileName)
modelDict = {'GuidedLDA':run_GuidedLDA,'CorEx':run_CorEx}
n_topics = getPositiveInteger('\nStep 3. Please specify the number of topics. (suggested range 10-20)\n')
n_words_per_topic = getPositiveInteger('\nStep 4. Please specify the number of words per topics. (suggested range 5-10)\n')
for model in modelDict:
print("="*35)
print("\nPerforming", model,"topic modeling -- please wait it might take a couple minutes!")
topicList = modelDict[model](chats, anchorList, n_topics, n_words_per_topic)
averagePMI, averageLCP, averageNZ = calculateTopicCoherenceMetrics(chats, topicList)
print("\nResults for",model," TC-PMI %3.3f, TC-LCP %3.3f, TC-NZ %3.3f:" % (averagePMI, averageLCP, averageNZ))
for topic in topicList:
print(topic)
main()
|
python
|
# ==============================================================================
# Imports
# ==============================================================================
import numpy as np
import os, glob
from tqdm import tqdm as tqdm
import tensorflow.compat.v1 as tf
tfq = tf.quantization
import tensorflow_probability as tfp
tfd = tfp.distributions
from binary_io import to_bit_string, from_bit_string
from misc import stateless_normal_sample
# ==============================================================================================
# ==============================================================================================
# ==============================================================================================
#
# Greedy Sampling
#
# ==============================================================================================
# ==============================================================================================
# ==============================================================================================
def code_greedy_sample(t_loc,
t_scale,
p_loc,
p_scale,
n_bits_per_step,
n_steps,
seed,
rho=1.):
n_samples = int(2**n_bits_per_step)
# The scale divisor needs to be square rooted because
# we are dealing with standard deviations and not variances
scale_divisor = np.sqrt(n_steps)
proposal_shard = tfd.Normal(loc=p_loc / n_steps,
scale=rho * p_scale / scale_divisor)
target = tfd.Normal(loc=t_loc,
scale=t_scale)
# Setup greedy sampler for loop
def loop_step(i, sample_index, best_sample):
samples = stateless_normal_sample(loc=proposal_shard.loc,
scale=proposal_shard.scale,
num_samples=n_samples,
seed=1000 * seed + i)
test_samples = tf.tile(tf.expand_dims(best_sample, 0), [n_samples, 1]) + samples
log_probs = tf.reduce_sum(target.log_prob(test_samples), axis=1)
index = tf.argmax(log_probs)
best_sample = test_samples[index, :]
return [i + 1, tf.concat((sample_index, [index]), axis=0), best_sample]
i = tf.constant(0)
best_sample = tf.zeros(tf.shape(p_loc), dtype=tf.float32)
sample_index = tf.cast([], tf.int32)
cond = lambda i, sample_index, best_sample: i < n_steps
_, sample_index, best_sample = tf.while_loop(cond=cond,
body=loop_step,
loop_vars=[i, sample_index, best_sample],
shape_invariants=[i.get_shape(),
tf.TensorShape([None]),
best_sample.get_shape()])
sample_index = tf.map_fn(lambda x: tf.numpy_function(to_bit_string, [x, n_bits_per_step], tf.string),
sample_index,
dtype=tf.string)
sample_index = tf.numpy_function(lambda indices: ''.join([ind.decode('utf-8') for ind in indices]),
[sample_index],
tf.string)
return best_sample, sample_index
def decode_greedy_sample(sample_index,
p_loc,
p_scale,
n_bits_per_step,
n_steps,
seed,
rho=1.):
# Perform a for loop for the below list comprehension
#
# indices = [from_bit_string(sample_index[i:i + n_bits_per_step])
# for i in range(0, n_bits_per_step * n_steps, n_bits_per_step)]
#
i = tf.constant(0, tf.int32)
indices = tf.cast([], tf.int32)
cond = lambda i, indices: i < n_bits_per_step * n_steps
def index_loop_step(i, indices):
index = tf.numpy_function(from_bit_string,
[tf.strings.substr(sample_index, i, n_bits_per_step)],
tf.int64)
index = tf.cast(index, tf.int32)
return [i + n_bits_per_step, tf.concat((indices, [index]), axis=0)]
_, indices = tf.while_loop(cond=cond,
body=index_loop_step,
loop_vars=[i, indices],
shape_invariants=[i.get_shape(),
tf.TensorShape([None])])
# ---------------------------------------------------------------------
# Reconver the sample
# ---------------------------------------------------------------------
# The scale divisor needs to be square rooted because
# we are dealing with standard deviations and not variances
scale_divisor = np.sqrt(n_steps)
proposal_shard = tfd.Normal(loc=p_loc / n_steps,
scale=rho * p_scale / scale_divisor)
n_samples = int(2**n_bits_per_step)
# Loop variables
i = tf.constant(0, tf.int32)
sample = tf.zeros(tf.shape(p_loc), dtype=tf.float32)
# Loop condition
cond = lambda i, indices: i < n_steps
# Loop body
def sample_loop_step(i, sample):
samples = tf.tile(tf.expand_dims(sample, 0), [n_samples, 1])
samples = samples + stateless_normal_sample(loc=proposal_shard.loc,
scale=proposal_shard.scale,
num_samples=n_samples,
seed=1000 * seed + i)
return [i + 1, samples[indices[i], :]]
# Run loop
_, sample = tf.while_loop(cond=cond,
body=sample_loop_step,
loop_vars=[i, sample],
shape_invariants=[i.get_shape(),
sample.get_shape()])
return sample
def code_grouped_greedy_sample(sess,
target,
proposal,
n_steps,
n_bits_per_step,
seed,
max_group_size_bits=12,
adaptive=True,
backfitting_steps=0,
use_log_prob=False,
rho=1.):
# Make sure the distributions have the correct type
if target.dtype is not tf.float32:
raise Exception("Target datatype must be float32!")
if proposal.dtype is not tf.float32:
raise Exception("Proposal datatype must be float32!")
n_bits_per_group = n_bits_per_step * n_steps
num_dimensions = sess.run(tf.reduce_prod(tf.shape(proposal.loc)))
# rescale proposal by the proposal
p_loc = sess.run(tf.reshape(tf.zeros_like(proposal.loc), [-1]))
p_scale = sess.run(tf.reshape(tf.ones_like(proposal.scale), [-1]))
# rescale target by the proposal
t_loc = sess.run(tf.reshape((target.loc - proposal.loc) / proposal.scale, [-1]))
t_scale = sess.run(tf.reshape(target.scale / proposal.scale, [-1]))
kl_divergences = tf.reshape(tfd.kl_divergence(target, proposal), [-1])
# ======================================================================
# Preprocessing step: determine groups for sampling
# ======================================================================
group_start_indices = [0]
group_kls = []
kl_divs = sess.run(kl_divergences)
total_kl_bits = np.sum(kl_divs) / np.log(2)
print("Total KL to split up: {:.2f} bits, "
"maximum bits per group: {}, "
"estimated number of groups: {},"
"coding {} dimensions".format(total_kl_bits,
n_bits_per_group,
total_kl_bits // n_bits_per_group + 1,
num_dimensions
))
current_group_size = 0
current_group_kl = 0
n_nats_per_group = n_bits_per_group * np.log(2) - 1
for idx in range(num_dimensions):
group_bits = np.log(current_group_size + 1) / np.log(2)
if group_bits >= max_group_size_bits or \
current_group_kl + kl_divs[idx] >= n_nats_per_group or \
idx == num_dimensions - 1:
group_start_indices.append(idx)
group_kls.append(current_group_kl / np.log(2))
current_group_size = 1
current_group_kl = kl_divs[idx]
else:
current_group_kl += kl_divs[idx]
current_group_size += 1
# ======================================================================
# Sample each group
# ======================================================================
results = []
group_start_indices += [num_dimensions]
# Get the importance sampling op before looping it to avoid graph construction cost
# The length is variable, hence the shape is [None]
target_loc = tf.placeholder(tf.float32, shape=[None])
target_scale = tf.placeholder(tf.float32, shape=[None])
prop_loc = tf.placeholder(tf.float32, shape=[None])
prop_scale = tf.placeholder(tf.float32, shape=[None])
seed_feed = tf.placeholder(tf.int32)
greedy_op = code_greedy_sample(t_loc=target_loc,
t_scale=target_scale,
p_loc=prop_loc,
p_scale=prop_scale,
n_bits_per_step=n_bits_per_step,
n_steps=n_steps,
seed=seed_feed,
rho=rho)
for i in tqdm(range(len(group_start_indices) - 1)):
start_idx = group_start_indices[i]
end_idx = group_start_indices[i + 1]
result = sess.run(greedy_op, feed_dict={target_loc: t_loc[start_idx:end_idx],
target_scale: t_scale[start_idx:end_idx],
prop_loc: p_loc[start_idx:end_idx],
prop_scale: p_scale[start_idx:end_idx],
seed_feed: seed + i})
results.append(result)
samples, codes = zip(*results)
bitcode = ''.join([c.decode('utf-8') for c in codes])
sample = tf.concat(samples, axis=0)
# Rescale the sample
sample = tf.reshape(proposal.scale, [-1]) * sample + tf.reshape(proposal.loc, [-1])
sample = sess.run(sample)
return sample, bitcode, group_start_indices
def decode_grouped_greedy_sample(sess,
bitcode,
group_start_indices,
proposal,
n_bits_per_step,
n_steps,
seed,
adaptive=True,
rho=1.):
# Make sure the distributions have the correct type
if proposal.dtype is not tf.float32:
raise Exception("Proposal datatype must be float32!")
n_bits_per_group = n_bits_per_step * n_steps
num_dimensions = sess.run(tf.reduce_prod(tf.shape(proposal.loc)))
# ======================================================================
# Decode each group
# ======================================================================
samples = []
group_start_indices += [num_dimensions]
p_loc = sess.run(tf.reshape(tf.zeros_like(proposal.loc), [-1]))
p_scale = sess.run(tf.reshape(tf.ones_like(proposal.scale), [-1]))
# Placeholders
sample_index = tf.placeholder(tf.string)
prop_loc = tf.placeholder(tf.float32, shape=[None])
prop_scale = tf.placeholder(tf.float32, shape=[None])
seed_feed = tf.placeholder(tf.int32)
# Get decoding op
decode_greedy_op = decode_greedy_sample(sample_index=sample_index,
p_loc=prop_loc,
p_scale=prop_scale,
n_bits_per_step=n_bits_per_step,
n_steps=n_steps,
seed=seed_feed,
rho=rho)
for i in tqdm(range(len(group_start_indices) - 1)):
if bitcode[n_bits_per_group * i: n_bits_per_group * (i + 1)] == '':
break
samp = sess.run(decode_greedy_op, feed_dict = {
sample_index: bitcode[n_bits_per_group * i: n_bits_per_group * (i + 1)],
prop_loc: p_loc[group_start_indices[i]:group_start_indices[i + 1]],
prop_scale: p_scale[group_start_indices[i]:group_start_indices[i + 1]],
seed_feed: seed + i
})
samples.append(samp)
sample = tf.concat(samples, axis=0)
# Rescale the sample
sample = tf.reshape(proposal.scale, [-1]) * sample + tf.reshape(proposal.loc, [-1])
return sess.run(sample)
|
python
|
from .features import Dictionary, RegexMatches, Stemmed, Stopwords
name = "portuguese"
try:
import enchant
dictionary = enchant.Dict("pt")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'pt'. " +
"Consider installing 'myspell-pt'.")
dictionary = Dictionary(name + ".dictionary", dictionary.check)
"""
:class:`~revscoring.languages.features.Dictionary` features via
:class:`enchant.Dict` "pt". Provided by `myspell-pt`
"""
try:
from nltk.corpus import stopwords as nltk_stopwords
stopwords = set(nltk_stopwords.words('portuguese'))
except LookupError:
raise ImportError("Could not load stopwords for {0}. ".format(__name__) +
"You may need to install the nltk 'stopwords' " +
"corpora. See http://www.nltk.org/data.html")
stopwords = Stopwords(name + ".stopwords", stopwords)
"""
:class:`~revscoring.languages.features.Stopwords` features provided by
:func:`nltk.corpus.stopwords` "portuguese"
"""
try:
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("portuguese")
except ValueError:
raise ImportError("Could not load stemmer for {0}. ".format(__name__))
stemmed = Stemmed(name + ".stemmed", stemmer.stem)
"""
:class:`~revscoring.languages.features.Stemmed` word features via
:class:`nltk.stem.snowball.SnowballStemmer` "portuguese"
"""
badword_regexes = [
r"baba[ckq](as?|ão|ões|u?i[cçs]s?e)", # douchebag
r"bi(ch|x)as?", # gay man
r"boio(l[ai](tico)?|l[aã]o|lo(go|[gj]i[sx]ta))s?", # gay man
r"bo(qu|k)etes?", # blowjob
r"bo[sx]t(ao?s?|alhao?)", # shit
r"b[uo]s?[cçs]s?et+(a[os]?|inha)?", # pussy (vagina)
r"bu[mn]d((inh)?as?|[ãa]o)", # ass
r"b[uo]rr[oaei](ce|[ius])?", # donkey/jackass
r"[ck]a[csç]s?ete?s?", # bludgeon
# shit
r"[ck]ag(a(r|n?do|dao?|n?ei(r[ao])?|(lh)?a?o|nitas?|dela|lhoto)?|ou)",
r"[ck]ara(l?hl?([ou]?s?|ao|inh[ou]s?)|i([ou]s?)?)", # fuck
r"(ch|x)at[ao]s?", # boring
r"(ch|x)up[aeiou]([dv]a|te|nha|ndo|r|u)?", # blow me
r"[ck]o[ck]ô", # poo
r"[ck]om(er?|i)", # fucked
r"[ck]onas?", # cunt
r"[ck]uz([aã]o|inho)", # asshole
r"doid(inh)?[ao]s?", # crazy
r"fed?(id?[ao]|e|orent[ao])s?", # stinks/stinky
r"fei[ao]s?", # ugly
r"fendi", # ???
r"f[ou]d(a[os]?|e[ru]?|idos?)", # fuck
r"go[sx]tos([ao]s?|ão|ões|onas?)", # hot
r"idiot(a|i[cçs]s?e)s?", # idiot
r"lo(k[oa]s?|u[ck]([oa]s?|ura|a(mente)?))", # crazy
r"maconheir[ao]s?", # bothead
r"m[áa]fia", # mafia
r"maldizentes", # slanderers
r"mecos", # cum ???
r"mentir(a|os[oa])s?", # lie/liar
r"merd(a|[ãa]o|oso|ica)s?", # shit
r"noob", # noob
r"ot[áa]ri[oa]s?", # sucker
r"pari[ou]", # part of "puta que o pariu"
r"pategos", # hick / yokel
r"pau", # dick
r"peid([ao]|[ãa]o|ei|ar(ia)?|ando|aç[oa])s?", # fart
r"p[êe]nis+", # penis
r"pilas?", # dick
r"piroca", # dick
r"porcaria", r"porn[ôo]?", # filth/porn
r"po(rr|h)a", # cum
r"pum", # fart
r"punhet(a|eir[oa])s?", # jack off / masturbate
r"put([ao]|[ao]na|aria|eiro|inha)s?", # bitch/hooker
r"safad([ao]|ona)s?", # shameless
r"te[sz]ão", r"te[sz]ud[oa]s?", # turn-on / horny
r"tran[sz]([aá](r(am)?|n?do)?|ou)", # sex
r"tretas?", # bullshit
r"trou?(ch|x)as?",
r"vadi([ao]s?|agem)", # bitch
r"viad(agem?|[aã]?o|inh[ou])s?", # gay person ("fucker")
r"xixi" # pee
]
badwords = RegexMatches(name + ".badwords", badword_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
badword detecting regexes.
"""
informal_regexes = [
r"adoro", # love
r"aki", # here
r"amo", # master
r"(?:b+?l+?a+?h*)+", # bla, blah, bbblllaaaahhhhhblah
r"carambas?", # OMG
r"coco", # coconut
r"copie[im]", # I copied
r"delicia", # delicious
r"editei", # edited
r"enfiar?", # to stick (up one's ass)
r"entao", # then
r"estrag(ar|uem)", # spoiled / ruined
r"fixe", # cool
r"gajo", # dude
r"h[aiou](h[aeiou])*", r"h[e](h[aeiou])+", # hi, ha, hehe, hohoho
r"k+?", # k, kkkkkkkkkkkkkkk
r"lindo", # pretty
r"l+[uo][uol]*l", # lol, LOLOLOL, LLLLoOOoLLL
r"mae", # mom
r"mto", # very
r"naum", # no (slang)
r"n[óo]is", # it's us (slang)
r"odeio", # hate
r"oi+", # hi
r"ol[aá]", # hello
r"ratas?", # "rat" -- a snitch
r"(?:rs)+", # lol
r"tava", # was / were (slang)
r"tbm", # also (slang)
r"vao", # vain
r"vcs", r"voce", r"voces", # you
r"xau" # bye
]
informals = RegexMatches(name + ".informals", informal_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
informal word detecting regexes.
"""
|
python
|
# --------------------------------------------------------------------
# Directory syncer by Alexander Sirotin (c) 2016
# Originally created for syncing between home NAS backup and Amazon cloud
# Both are mounted on the host machine (Amazon cloud is mounted using acd_cli)
# This program comes without any warranty, use it at your own risk.
# Feel free to contact me at [email protected]
# --------------------------------------------------------------------
import os
import sys
import filecmp
import logging
import argparse
import shutil
class DirectorySyncer:
# Note: Recursive function, goes over all the sub-directories as well
def __compareTwoDirectories(self, left, right):
logging.debug("Comparing between '%s' and '%s'" % (left, right))
# Make sure both directories exists
if not os.path.exists(left) or not os.path.isdir(left):
raise Exception, "Provided left directory '%s' does not exist or not a directory!" % left
if not os.path.exists(right) or not os.path.isdir(right):
raise Exception, "Provided right directory '%s' does not exist or not a directory!" % right
# Compare the two directories and create two lists containing the missing parts
result = filecmp.dircmp(left, right)
leftOnly = self.__removeSpecial(result.left_only)
rightOnly = self.__removeSpecial(result.right_only)
# Add full path to the elements
leftOnly = self.__convertToFullPath(left, leftOnly)
rightOnly = self.__convertToFullPath(right, rightOnly)
# Go over all the files and make sure that their sizes match
commonFiles = self.__removeSpecial(result.common_files)
for file in commonFiles:
leftPath = os.path.join(left, file)
leftFileSize = os.path.getsize(leftPath)
rightPath = os.path.join(right, file)
rightFileSize = os.path.getsize(rightPath)
if leftFileSize > rightFileSize:
logging.warn("Problem found: Size of '%s' (%s) is bigger than '%s' (%s)" % (leftPath, self.__formatDiskSpace(leftFileSize), rightPath, self.__formatDiskSpace(rightFileSize)))
leftOnly.append(leftPath)
elif rightFileSize > leftFileSize:
logging.warn("Problem found: Size of '%s' (%s) is bigger than '%s' (%s)" % (rightPath, self.__formatDiskSpace(rightFileSize), leftPath, self.__formatDiskSpace(leftFileSize)))
rightOnly.append(rightPath)
# Get common dirs for recursive call
dirs = self.__removeSpecial(result.common_dirs)
for dir in dirs:
childLeftOnly, childRightOnly = self.__compareTwoDirectories(os.path.join(left, dir), os.path.join(right, dir))
leftOnly.extend(childLeftOnly)
rightOnly.extend(childRightOnly)
return leftOnly, rightOnly
def __removeSpecial(self, list):
return [x for x in list if not x.startswith(".")]
def __convertToFullPath(self, basePath, list):
for i in range(len(list)):
list[i] = os.path.join(basePath, list[i])
return list
def __removeRootLocation(self, path, list):
n = len(path) + 1
for i in range(len(list)):
list[i] = list[i][n:]
return list
def __getSizeStr(self, path):
size = os.path.getsize(path)
if (os.path.isdir(path)):
size += self.__calculateDiskSpace(path, os.listdir(path))
return self.__formatDiskSpace(size)
def __calculateDiskSpace(self, path, list):
diskSpace = 0
for x in list:
fullX = os.path.join(path, x)
diskSpace += os.path.getsize(fullX)
if os.path.isdir(fullX):
content = [os.path.join(fullX, f) for f in os.listdir(fullX)]
diskSpace += self.__calculateDiskSpace(path, content)
return diskSpace
def __askYesNoQuestion(self, message):
yes = set(["yes", "y", ""])
no = set(["no", "n"])
while True:
sys.stdout.write("%s [Y/N] " % message)
choice = raw_input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please response with a valid answer.")
def __buildYesNoQuestion(self, fromPath, toPath, file):
f = os.path.join(fromPath, file)
t = os.path.join(toPath, file)
return self.__askYesNoQuestion("Copy from '%s' to '%s' (%s) ? " % (f, t, self.__getSizeStr(f)))
def __verboseSelectFromList(self, fromPath, toPath, list):
return [x for x in list if self.__buildYesNoQuestion(fromPath, toPath, x)]
# Note: Recursive function, enters each directory and copies each file seperately
def __copyMissingFiles(self, fromPath, toPath, list, dryRun):
for file in list:
src = os.path.join(fromPath, file)
dst = os.path.join(toPath, file)
# In case destination file exists, remove it...
if (not dryRun) and os.path.exists(dst):
os.remove(dst)
try:
if os.path.isdir(src):
# Create the destination directory
if not dryRun:
os.mkdir(dst)
# Recursive call to copy all directory content
recursiveList = os.listdir(src)
self.__copyMissingFiles(src, dst, recursiveList, dryRun)
else:
logging.info("Copying '%s' to '%s' (%s)" % (src, dst, self.__getSizeStr(src)))
if not dryRun:
shutil.copy(src, dst)
except Exception as e:
# In case of exception, we want to remove dst in order to avoid partially copied files
if not dryRun:
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)
raise e
def __formatDiskSpace(self, space):
KB = 1024.0
MB = 1024 * KB
GB = 1024 * MB
if space < 10 * MB:
return "%.2f KB" % (space / KB)
if (space < GB):
return "%.2f MB" % (space / MB)
return "%.2f GB" % (space / GB)
def __showNeededDiskSpace(self, pointA, pointB, leftOnly, rightOnly):
logging.info("Needed disk space for sync point '%s' is %s" % (pointA, self.__formatDiskSpace(self.__calculateDiskSpace(pointB, rightOnly))))
logging.info("Needed disk space for sync point '%s' is %s" % (pointB, self.__formatDiskSpace(self.__calculateDiskSpace(pointA, leftOnly))))
def sync(self, pointA, pointB, dryRun=False, verbose=False):
if dryRun:
logging.warn("DRY-RUN - No actual copies will occur !!!")
logging.info("Syncing between '%s' and '%s'" % (pointA, pointB))
try:
# Create two lists contains the differences between the given points
leftOnly, rightOnly = self.__compareTwoDirectories(pointA, pointB)
leftOnlyLen = len(leftOnly)
rightOnlyLen = len(rightOnly)
logging.info("Found %d differences (%d are missing in '%s' and %d are missing in '%s')" % (leftOnlyLen + rightOnlyLen, rightOnlyLen, pointA, leftOnlyLen, pointB))
# Remove base path from results
leftOnly = self.__removeRootLocation(pointA, leftOnly)
rightOnly = self.__removeRootLocation(pointB, rightOnly)
# Show needed disk space
self.__showNeededDiskSpace(pointA, pointB, leftOnly, rightOnly)
# In case of verbose flag, ask the user what to do
if (not dryRun) and verbose:
leftOnly = self.__verboseSelectFromList(pointA, pointB, leftOnly)
rightOnly = self.__verboseSelectFromList(pointB, pointA, rightOnly)
# Show needed disk space
self.__showNeededDiskSpace(pointA, pointB, leftOnly, rightOnly)
# Recalculate number of differences
leftOnlyLen = len(leftOnly)
rightOnlyLen = len(rightOnly)
logging.info("Start processing %d differences (%d are missing in '%s' and %d are missing in '%s')" % (leftOnlyLen + rightOnlyLen, rightOnlyLen, pointA, leftOnlyLen, pointB))
self.__copyMissingFiles(pointA, pointB, leftOnly, dryRun)
self.__copyMissingFiles(pointB, pointA, rightOnly, dryRun)
logging.info("Done!")
except Exception as e:
logging.error(e.args[0])
return False
return True
def configure():
# Configure the logger
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", datefmt="%d/%m/%Y %H:%M:%S", level=logging.INFO)
# Read the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--left", required=True)
parser.add_argument("-r", "--right", required=True)
parser.add_argument("-d", "--dry_run", action="store_const", const=True, default=False);
parser.add_argument("-v", "--verbose", action="store_const", const=True, default=False);
args = parser.parse_args()
# Return the arguments
return args
def main():
args = configure()
pointA = os.path.normpath(args.left)
pointB = os.path.normpath(args.right)
syncer = DirectorySyncer()
syncer.sync(pointA, pointB, dryRun=args.dry_run, verbose=args.verbose)
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: [email protected]
@file: wechatApiConf.py
@time: 2021/7/1 0001 11:32
@desc:
'''
class WechatApiConfig:
def __init__(self):
self.url = None
self.init = None
|
python
|
#
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import attr
from plugincode.post_scan import PostScanPlugin
from plugincode.post_scan import post_scan_impl
from scancode import CommandLineOption
from scancode import POST_SCAN_GROUP
from summarycode import facet
# Tracing flags
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, unicode) and a or repr(a) for a in args))
"""
A plugin to compute a licensing clarity score as designed in ClearlyDefined
"""
# minimum score to consider a license detection as good.
MIN_GOOD_LICENSE_SCORE = 80
@post_scan_impl
class LicenseClarityScore(PostScanPlugin):
"""
Compute a License clarity score at the codebase level.
"""
codebase_attributes = dict(license_score=attr.ib(default=attr.Factory(OrderedDict)))
sort_order = 110
options = [
CommandLineOption(('--license-clarity-score',),
is_flag=True,
default=False,
help='Compute a summary license clarity score at the codebase level.',
help_group=POST_SCAN_GROUP,
required_options=['classify', 'license', 'copyright'],
)
]
def is_enabled(self, license_clarity_score, **kwargs):
return license_clarity_score
def process_codebase(self, codebase, license_clarity_score, **kwargs):
if TRACE:
logger_debug('LicenseClarityScore:process_codebase')
scoring_elements = compute_license_score(codebase, **kwargs)
codebase.attributes.license_score.update(scoring_elements)
def compute_license_score(codebase, min_score=MIN_GOOD_LICENSE_SCORE, **kwargs):
"""
Return a mapping of scoring elements and a license clarity score computed at
the codebase level.
"""
score = 0
scoring_elements = OrderedDict(score=score)
# FIXME: separate the compute of each score element from applying the weights
############################################################################
top_level_declared_licenses_weight = 30
has_top_level_declared_licenses = get_top_level_declared_licenses(codebase, min_score)
scoring_elements['has_top_level_declared_licenses'] = bool(has_top_level_declared_licenses)
if has_top_level_declared_licenses:
score += top_level_declared_licenses_weight
if TRACE:
logger_debug(
'compute_license_score:has_top_level_declared_licenses:',
has_top_level_declared_licenses, 'score:', score)
############################################################################
file_level_license_and_copyright_weight = 25
file_level_license_and_copyright_coverage = 0
files_with_lic_copyr, files_count = get_other_licenses_and_copyrights_counts(codebase, min_score)
if TRACE:
logger_debug('compute_license_score:files_with_lic_copyr:',
files_with_lic_copyr, 'files_count:', files_count)
scoring_elements['file_level_license_and_copyright_coverage'] = 0
if files_count:
file_level_license_and_copyright_coverage = files_with_lic_copyr / files_count
score += int(file_level_license_and_copyright_coverage * file_level_license_and_copyright_weight)
scoring_elements['file_level_license_and_copyright_coverage'] = file_level_license_and_copyright_coverage
if TRACE:
logger_debug('compute_license_score:file_level_license_and_copyright_coverage:',
file_level_license_and_copyright_coverage, 'score:', score)
############################################################################
license_consistency_weight = 15
has_consistent_key_and_file_level_license = False
key_files_license_keys, other_files_license_keys = get_unique_licenses(codebase, min_score)
if key_files_license_keys and key_files_license_keys == other_files_license_keys:
has_consistent_key_and_file_level_license = True
scoring_elements['has_consistent_key_and_file_level_license'] = has_consistent_key_and_file_level_license
if has_consistent_key_and_file_level_license:
score += license_consistency_weight
if TRACE:
logger_debug(
'compute_license_score:has_consistent_key_and_file_level_license:',
has_consistent_key_and_file_level_license, 'score:', score)
############################################################################
spdx_standard_licenses_weight = 15
has_all_spdx_licenses = all(has_spdx_licenses(res) for res in codebase.walk() if res.is_file)
scoring_elements['has_all_spdx_licenses'] = has_all_spdx_licenses
if has_all_spdx_licenses:
score += spdx_standard_licenses_weight
if TRACE:
logger_debug(
'compute_license_score:',
'has_all_spdx_licenses:',
has_all_spdx_licenses, 'score:', score)
############################################################################
license_texts_weight = 15
all_keys = key_files_license_keys & other_files_license_keys
keys_with_license_text = get_detected_license_keys_with_full_text(codebase, min_score)
has_all_license_texts = all_keys == keys_with_license_text
scoring_elements['has_all_license_texts'] = has_all_license_texts
if has_all_license_texts:
score += license_texts_weight
scoring_elements['score'] = score
return scoring_elements
def get_top_level_declared_licenses(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
A project has specific key file(s) at the top level of its code hierarchy
such as LICENSE, NOTICE or similar (and/or a package manifest) containing
structured license information such as an SPDX license expression or SPDX
license identifier, and the file(s) contain "clearly defined" declared
license information (a license declaration such as a license expression
and/or a series of license statements or notices).
Note: this ignores facets.
"""
key_files = (res for res in codebase.walk(topdown=True) if is_key_file(res))
detected_good_licenses = []
for resource in key_files:
if resource.scan_errors:
continue
# TODO: should we also ignore or penalize non SPDX licenses?
for detected_license in resource.licenses:
"""
"licenses": [
{
"score": 23.0,
"start_line": 1,
"end_line": 1,
"matched_rule": {
"identifier": "lgpl-2.1_38.RULE",
"license_expression": "lgpl-2.1",
"licenses": [
"lgpl-2.1"
]
},
"""
if detected_license['score'] < min_score:
continue
items = ('path', resource.path,)
items += tuple((k, v) for k, v in detected_license.items()
if (
k in ('score', 'start_line', 'end_line', 'matched_rule',)
)
)
detected_good_licenses.append(items)
return detected_good_licenses
def is_key_file(resource):
"""
Return True if a Resource is considered as a "key file".
"""
return (
resource.is_file
and resource.is_top_level
and (resource.is_readme
or resource.is_legal
or resource.is_manifest)
)
def is_core_facet(resource, core_facet=facet.FACET_CORE):
"""
Return True if the resource is in the core facet.
If we do not have facets, everything is considered as being core by default.
"""
has_facets = hasattr(resource, 'facets')
if not has_facets:
return True
# facets is a list
return not resource.facets or core_facet in resource.facets
def has_good_licenses(resource, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return True if a Resource licenses are all detected with a score above min_score.
"""
if not resource.licenses:
return False
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if detected_license['score'] < min_score:
return False
return True
def has_spdx_licenses(resource):
"""
Return True if a Resource licenses are all known SPDX licenses.
"""
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if not detected_license.get('spdx_license_key'):
return False
return True
def get_unique_licenses(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return a tuple of two sets of license keys found in the codebase with at least min_score:
- the set license found in key files
- the set license found in non-key files
This is only for files in the core facet.
"""
key_license_keys = set()
other_license_keys = set()
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if not resource.is_file:
continue
if not (is_key_file(resource) or is_core_facet(resource)):
continue
if is_key_file(resource):
license_keys = key_license_keys
else:
license_keys = other_license_keys
for detected_license in resource.licenses:
if detected_license['score'] < min_score:
continue
license_keys.add(detected_license['key'])
return key_license_keys, other_license_keys
def get_detected_license_keys_with_full_text(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return a set of license keys for which at least one detection includes the
full license text.
This is for any files in the core facet or not.
"""
license_keys = set()
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if not resource.is_file:
continue
for detected_license in resource.licenses:
if detected_license['score'] < min_score:
continue
if not detected_license['matched_rule']['is_license_text']:
continue
license_keys.add(detected_license['key'])
return license_keys
def get_other_licenses_and_copyrights_counts(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return a tuple of (count of files with a license/copyright, total count of
files).
Do files that can contain licensing and copyright information reliably carry
such information? This is based on a percentage of files in the core facet
of the project that have both:
- A license statement such as a text, notice or an SPDX-License-Identifier and,
- A copyright statement in standard format.
Here "reliably" means that these are reliably detected by tool(s) with a
high level of confidence This is a progressive element that is computed
based on:
- LICCOP: the number of files with a license notice and copyright statement
- TOT: the total number of files
"""
total_files_count = 0
files_with_good_license_and_copyright_count = 0
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if is_key_file(resource) or not resource.is_file:
continue
if not is_core_facet(resource):
continue
total_files_count += 1
if resource.scan_errors:
continue
if not (resource.licenses or resource.copyrights):
continue
if not has_good_licenses(resource, min_score):
continue
files_with_good_license_and_copyright_count += 1
return files_with_good_license_and_copyright_count, total_files_count
|
python
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.front.tf.graph_utils import create_op_node_with_second_input
from mo.graph.graph import Graph
from mo.ops.reshape import Reshape
class NonMaxSuppressionNormalize(FrontReplacementSubgraph):
"""
The transformation converts several inputs of the NonMaxSuppression layer to be 1D instead of 0D with shape [1] to
comply with the layer specification.
"""
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for nms in graph.get_op_nodes(op='NonMaxSuppression'):
# make inputs 2 to 5 to have shape [1] instead of [0] (convert 0D to 1D)
nms_name = nms.soft_get('name', nms.id)
for port_id in range(2, 6):
if port_id in nms.in_ports() and not nms.in_port(port_id).disconnected():
reshape_1d = create_op_node_with_second_input(graph, Reshape, int64_array([1]),
{'name': nms_name + '/Reshape_1D_{}'.format(port_id)})
nms.in_port(port_id).get_connection().insert_node(reshape_1d)
|
python
|
import os.path as osp
import pickle
from collections import Counter
import torch
from torch.utils.data import DataLoader
import spacy
from tqdm import tqdm
import lineflow as lf
import lineflow.datasets as lfds
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
START_TOKEN = '<s>'
END_TOKEN = '</s>'
IGNORE_INDEX = -100
NLP = spacy.load('en_core_web_sm',
disable=['vectors', 'textcat', 'tagger', 'ner', 'parser'])
def preprocess(x):
tokens = [token.text.lower() for token in NLP(x[0]) if not token.is_space]
return ([START_TOKEN] + tokens + [END_TOKEN], x[1])
def build_vocab(tokens, cache='vocab.pkl', max_size=50000):
if not osp.isfile(cache):
counter = Counter(tokens)
words, _ = zip(*counter.most_common(max_size))
words = [PAD_TOKEN, UNK_TOKEN] + list(words)
token_to_index = dict(zip(words, range(len(words))))
if START_TOKEN not in token_to_index:
token_to_index[START_TOKEN] = len(token_to_index)
words += [START_TOKEN]
if END_TOKEN not in token_to_index:
token_to_index[END_TOKEN] = len(token_to_index)
words += [END_TOKEN]
with open(cache, 'wb') as f:
pickle.dump((token_to_index, words), f)
else:
with open(cache, 'rb') as f:
token_to_index, words = pickle.load(f)
return token_to_index, words
def postprocess(token_to_index,
unk_index):
def f(x):
token_index = [token_to_index.get(token, unk_index) for token in x[0]]
return token_index, x[1]
return f
def get_collate_fn(pad_index):
def f(batch):
indices, labels = zip(*batch)
max_length = max(len(x) for x in indices)
padded = [x + [pad_index] * (max_length - len(x)) for x in indices]
return torch.LongTensor(padded), torch.LongTensor(labels)
return f
if __name__ == '__main__':
print('Reading...')
train = lfds.Imdb('train').map(preprocess)
tokens = lf.flat_map(lambda x: x[0],
train,
lazy=True)
print('Building vocabulary...')
token_to_index, _ = build_vocab(tokens, 'vocab.pkl')
print(f'Vocab Size: {len(token_to_index)}')
pad_index = token_to_index[PAD_TOKEN]
unk_index = token_to_index[UNK_TOKEN]
loader = DataLoader(
train
.map(postprocess(token_to_index, unk_index))
.save('imdb.train.cache'),
batch_size=32,
num_workers=4,
collate_fn=get_collate_fn(pad_index))
for batch in tqdm(loader):
...
del loader
|
python
|
import unittest
from helpers.queuehelper import QueueName
#from backend.fcmapp import InfrastructureService
from backend.fcmbus import Bus
class TestBus(unittest.TestCase):
#@classmethod
#def make_bus(self):
# return Bus(InfrastructureService('', '', '', '', '', ''))
def test_bus_get_name_q(self):
#bus = Test_bus.make_bus()
self.assertTrue(Bus.get_queue_name(QueueName.Q_ALERT) == "alert")
def test_bus_get_name_str(self):
#bus = Test_bus.make_bus()
self.assertTrue(Bus.get_queue_name("alert") == "alert")
if __name__ == '__main__':
unittest.main()
|
python
|
import importlib
import sys
# the following are python opcodes taken from the `opcode` module
# these have been constantized for easier access
# these are the opcodes used by python
# not to be confused with opcodes from neo.VM.OpCode,
# which are the opcodes for the neo vm
POP_TOP = 1
ROT_TWO = 2
ROT_THREE = 3
DUP_TOP = 4
DUP_TOP_TWO = 5
NOP = 9
UNARY_POSITIVE = 10
UNARY_NEGATIVE = 11
UNARY_NOT = 12
UNARY_INVERT = 15
BINARY_MATRIX_MULTIPLY = 16
INPLACE_MATRIX_MULTIPLY = 17
BINARY_POWER = 19
BINARY_MULTIPLY = 20
BINARY_MODULO = 22
BINARY_ADD = 23
BINARY_SUBTRACT = 24
BINARY_SUBSCR = 25
BINARY_FLOOR_DIVIDE = 26
BINARY_TRUE_DIVIDE = 27
INPLACE_FLOOR_DIVIDE = 28
INPLACE_TRUE_DIVIDE = 29
GET_AITER = 50
GET_ANEXT = 51
BEFORE_ASYNC_WITH = 52
INPLACE_ADD = 55
INPLACE_SUBTRACT = 56
INPLACE_MULTIPLY = 57
INPLACE_MODULO = 59
STORE_SUBSCR = 60
DELETE_SUBSCR = 61
BINARY_LSHIFT = 62
BINARY_RSHIFT = 63
BINARY_AND = 64
BINARY_XOR = 65
BINARY_OR = 66
INPLACE_POWER = 67
GET_ITER = 68
GET_YIELD_FROM_ITER = 69
PRINT_EXPR = 70
LOAD_BUILD_CLASS = 71
YIELD_FROM = 72
GET_AWAITABLE = 73
INPLACE_LSHIFT = 75
INPLACE_RSHIFT = 76
INPLACE_AND = 77
INPLACE_XOR = 78
INPLACE_OR = 79
BREAK_LOOP = 80
WITH_CLEANUP_START = 81
WITH_CLEANUP_FINISH = 82
RETURN_VALUE = 83
IMPORT_STAR = 84
YIELD_VALUE = 86
POP_BLOCK = 87
END_FINALLY = 88
POP_EXCEPT = 89
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
STORE_NAME = 90 # Index in name list
DELETE_NAME = 91 # ""
UNPACK_SEQUENCE = 92 # Number of tuple items
FOR_ITER = 93 # jrel op
UNPACK_EX = 94
STORE_ATTR = 95 # Index in name list
DELETE_ATTR = 96 # ""
STORE_GLOBAL = 97 # ""
DELETE_GLOBAL = 98 # ""
LOAD_CONST = 100 # Index in const list
LOAD_NAME = 101 # Index in name list
BUILD_TUPLE = 102 # Number of tuple items
BUILD_LIST = 103 # Number of list items
BUILD_SET = 104 # Number of set items
BUILD_MAP = 105 # Number of dict entries (upto 255
LOAD_ATTR = 106 # Index in name list
COMPARE_OP = 107 # Comparison operator
IMPORT_NAME = 108 # Index in name list
IMPORT_FROM = 109 # Index in name list
JUMP_FORWARD = 110 # Number of bytes to skip
JUMP_IF_FALSE_OR_POP = 111 # Target byte offset from beginning of code
JUMP_IF_TRUE_OR_POP = 112 # "jabs op"
JUMP_ABSOLUTE = 113 # "jabs op"
POP_JUMP_IF_FALSE = 114 # "jabs op"
POP_JUMP_IF_TRUE = 115 # "jabs op"
LOAD_GLOBAL = 116 # Index in name list
CONTINUE_LOOP = 119 # Target address jrel
SETUP_LOOP = 120 # Distance to target address jrel
SETUP_EXCEPT = 121 # "jrel"
SETUP_FINALLY = 122 # "jrel"
LOAD_FAST = 124 # Local variable number
STORE_FAST = 125 # Local variable number
DELETE_FAST = 126 # Local variable number
RAISE_VARARGS = 130 # Number of raise arguments (1, 2, or 3
CALL_FUNCTION = 131 # #args + (#kwargs << 8
MAKE_FUNCTION = 132 # Number of args with default values
BUILD_SLICE = 133 # Number of items
MAKE_CLOSURE = 134
LOAD_CLOSURE = 135
LOAD_DEREF = 136
STORE_DEREF = 137
DELETE_DEREF = 138
CALL_FUNCTION_VAR = 140 # #args + (#kwargs << 8
CALL_FUNCTION_KW = 141 # #args + (#kwargs << 8
CALL_FUNCTION_VAR_KW = 142 # #args + (#kwargs << 8
SETUP_WITH = 143
LIST_APPEND = 145
SET_ADD = 146
MAP_ADD = 147
LOAD_CLASSDEREF = 148
SETUP_ASYNC_WITH = 154
EXTENDED_ARG = 144
BUILD_LIST_UNPACK = 149
BUILD_MAP_UNPACK = 150
BUILD_MAP_UNPACK_WITH_CALL = 151
BUILD_TUPLE_UNPACK = 152
BUILD_SET_UNPACK = 153
# boa custom ops
FROMALTSTACK = 241
DROP = 242
BR_S = 243
SETITEM = 244
LD_ELEMENT = 245
XSWAP = 246
ROLL = 247
DROP_BODY = 248
LOAD_CLASS_ATTR = 249
DEBUG_OP = 250
# the following is a convienience method
# for a human readable version of the ops
module = importlib.import_module('boa.code.pyop')
items = dir(sys.modules[__name__])
def to_name(op):
"""
:param op:
:return:
"""
for item in items:
n = getattr(module, item)
if op == n:
return item
return None
|
python
|
from django.contrib import admin
from testModel.models import Test,Contact,Tag
class TagInline(admin.TabularInline):
model =Tag
# Register your models here.
class ContactAdmin(admin.ModelAdmin):
list_display = ('name','age', 'email')
search_fields = ('name',)
inlines =[TagInline]
# fields = ("name","email")
fieldsets =(
["Main",{
"fields":("name","email"),
}],
["Advance",{
"classes":("collapse",),
"fields":("age",),
}],
)
admin.site.register(Contact,ContactAdmin)
admin.site.register([Test,Tag])
|
python
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':
if not inorder or not postorder:
return
root = TreeNode(postorder[-1])
i = 0
while inorder[i] != postorder[-1]:
i += 1
root.left = self.buildTree(inorder[:i], postorder[:i])
root.right =self.buildTree(inorder[i+1:], postorder[i:-1])
return root
# def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':
# if not inorder or not postorder:
# return []
# root = TreeNode(postorder[-1])
# self._buildTree(root,inorder,postorder)
# return root
# def _buildTree(self, node, inorder, postorder) -> 'TreeNode':
# rootIndex_inorder = inorder.index(postorder[-1])
# lenOfLeftSubTree = rootIndex_inorder
# lenOfRightSubTree = len(inorder)-lenOfLeftSubTree-1
# if lenOfLeftSubTree > 0:
# node.left = TreeNode(postorder[lenOfLeftSubTree-1])
# self._buildTree(node.left,inorder[0:rootIndex_inorder],postorder[0:lenOfLeftSubTree])
# if lenOfRightSubTree > 0:
# node.right = TreeNode(postorder[lenOfLeftSubTree+lenOfRightSubTree-1])
# self._buildTree(node.right,inorder[rootIndex_inorder+1:],postorder[lenOfLeftSubTree:lenOfLeftSubTree+lenOfRightSubTree])
# return
|
python
|
import argparse
import os.path
import numpy as np
import torch
import torchvision
import torchvision.transforms as T
from sklearn.model_selection import train_test_split
from MIA.Attack.Augmentation import Augmentation
from model import CIFAR
parser = argparse.ArgumentParser()
parser.add_argument("--save_to", default='models', type=str)
parser.add_argument("--name", default='cifar10', type=str)
if __name__ == "__main__":
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
target = CIFAR(10)
target.to(device)
target.load_state_dict(torch.load(os.path.join(args.save_to, args.name + ".pth")))
train = torchvision.datasets.CIFAR10(root='../data', train=True,
download=True)
test = torchvision.datasets.CIFAR10(root='../data', train=False,
download=True)
X, Y = np.concatenate((train.data, test.data)), np.concatenate((train.targets, test.targets)).astype(np.int64)
target_X, shadow_X, target_Y, shadow_Y = train_test_split(X, Y, test_size=0.5, random_state=42)
transform = T.Compose(
[T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trans = [T.RandomRotation(10)]
times = [5 for _ in range(len(trans))]
attack_model = Augmentation(device, trans, times, transform=transform)
attack_model.evaluate(target, *train_test_split(target_X, target_Y, test_size=0.7, random_state=42), show=True)
# membership = attack_model(target, target_X, target_Y)
|
python
|
"""TrackML scoring metric"""
__authors__ = ['Sabrina Amrouche', 'David Rousseau', 'Moritz Kiehn',
'Ilija Vukotic']
import numpy
import pandas
def _analyze_tracks(truth, submission):
"""Compute the majority particle, hit counts, and weight for each track.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
Returns
-------
pandas.DataFrame
Contains track_id, nhits, major_particle_id, major_particle_nhits,
major_nhits, and major_weight columns.
"""
# true number of hits for each particle_id
particles_nhits = truth['particle_id'].value_counts(sort=False)
total_weight = truth['weight'].sum()
# combined event with minimal reconstructed and truth information
event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],
submission[['hit_id', 'track_id']],
on=['hit_id'], how='left', validate='one_to_one')
event.drop('hit_id', axis=1, inplace=True)
event.sort_values(by=['track_id', 'particle_id'], inplace=True)
# ASSUMPTIONs: 0 <= track_id, 0 <= particle_id
tracks = []
# running sum for the reconstructed track we are currently in
rec_track_id = -1
rec_nhits = 0
# running sum for the particle we are currently in (in this track_id)
cur_particle_id = -1
cur_nhits = 0
cur_weight = 0
# majority particle with most hits up to now (in this track_id)
maj_particle_id = -1
maj_nhits = 0
maj_weight = 0
for hit in event.itertuples(index=False):
# we reached the next track so we need to finish the current one
if (rec_track_id != -1) and (rec_track_id != hit.track_id):
# could be that the current particle is the majority one
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for this track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits,
maj_weight / total_weight))
# setup running values for next track (or first)
if rec_track_id != hit.track_id:
rec_track_id = hit.track_id
rec_nhits = 1
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
maj_particle_id = -1
maj_nhits = 0
maj_weights = 0
continue
# hit is part of the current reconstructed track
rec_nhits += 1
# reached new particle within the same reconstructed track
if cur_particle_id != hit.particle_id:
# check if last particle has more hits than the majority one
# if yes, set the last particle as the new majority particle
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# reset runnig values for current particle
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
# hit belongs to the same particle within the same reconstructed track
else:
cur_nhits += 1
cur_weight += hit.weight
# last track is not handled inside the loop
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for the last track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))
cols = ['track_id', 'nhits',
'major_particle_id', 'major_particle_nhits',
'major_nhits', 'major_weight']
return pandas.DataFrame.from_records(tracks, columns=cols)
def score_event(truth, submission):
"""Compute the TrackML event score for a single event.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
"""
tracks = _analyze_tracks(truth, submission)
purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits'])
purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])
good_track = (0.5 < purity_rec) & (0.5 < purity_maj)
return tracks['major_weight'][good_track].sum()
|
python
|
from crc.api.common import ApiError
from crc.scripts.script import Script
from crc.services.study_service import StudyService
class UpdateStudyAssociates(Script):
argument_error_message = "You must supply at least one argument to the " \
"update_study_associates task, an array of objects in the form " \
"{'uid':'someid', 'role': 'text', 'send_email: 'boolean', " \
"'access':'boolean'} "
def get_description(self):
return """Allows you to associate other users with a study - only 'uid' is required in the
incoming dictionary, but will be useless without other information - all values will default to
false or blank
An empty list will delete the existing Associated list (except owner)
Each UID will be validated vs ldap and will raise an error if the uva_uid is not found. This supplied list will replace
any
associations already in place.
example : update_study_associates([{'uid':'sbp3ey','role':'Unicorn Herder', 'send_email': False, 'access':True}])
"""
def validate_arg(self, arg):
if not isinstance(arg, list):
raise ApiError("invalid parameter", "This function is expecting a list of dictionaries")
if len(arg[0]) > 0:
if not len(arg) > 0 and not isinstance(arg[0], dict):
raise ApiError("invalid paramemter", "This function is expecting a list of dictionaries")
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
if len(args) == 0:
items = []
else:
items = args[0]
self.validate_arg(items)
return all([x.get('uid', False) for x in items])
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
if len(args) == 0:
access_list = []
else:
access_list = args[0]
self.validate_arg(access_list)
return StudyService.update_study_associates(study_id, access_list)
|
python
|
from django.shortcuts import render, redirect
from .models import *
from django.http import Http404
from django.contrib.auth.models import User
from rest_framework import viewsets
from .sheet2 import interest_responses, firstapplication_response
# from .sheet3 import assesment_responses, score_response
# from django.contrib.auth.models import User
# from django.shortcuts import render
# from .filters import UserFilter
# Create your views here.
#class Profileview(viewsets.ModelViewSet):
#queryset= Profile.objects.all()
#serializer_class = ProfileSerializer
def homepage(request):
'''
assuming we make the api call
'''
form_data=interest_responses()
response = firstapplication_response()
for email in interestModel.objects.values_list('email', flat=True).distinct():
interestModel.objects.filter(pk__in= interestModel.objects.filter(email=email).values_list('id', flat=True)[1:]).delete()
res= interestModel.objects.all()
return render(request,'interest.html',{'data':res})
# def scorecard(request):
# '''
# Assuming we make the api call
# '''
# # form_data=assesment_responses()
# form_data=assesment_responses()
# response = score_response()
# for email in scoreModel.objects.values_list('email', flat=True).distinct():
# scoreModel.objects.filter(pk__in= scoreModel.objects.filter(email=email).values_list('id', flat=True)[1:]).delete()
# res= scoreModel.objects.all()
# return render(request,'scores.html',{'data':res})
# def search(request):
# user_list = User.objects.all()
# user_filter = UserFilter(request.GET, queryset=user_list)
# return render(request, 'search/user_list.html', {'filter': user_filter})
|
python
|
"""The qnap component."""
|
python
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
def test_nginx_service(host):
assert host.service("nginx-podman").is_running
assert host.service("nginx-podman").is_enabled
def test_nginx_listening(host):
assert host.socket("tcp://0.0.0.0:80").is_listening
def test_serve_static_page(host):
assert host.check_output("curl http://localhost") == "Hello World"
|
python
|
import unittest
from typing import List, Optional
from swift_cloud_py.common.errors import SafetyViolation
from swift_cloud_py.entities.intersection.intersection import Intersection
from swift_cloud_py.entities.intersection.traffic_light import TrafficLight
from swift_cloud_py.entities.intersection.signalgroup import SignalGroup
from swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule, GreenYellowInterval
from swift_cloud_py.entities.intersection.sg_relations import Conflict
from swift_cloud_py.validate_safety_restrictions.validate_completeness import validate_completeness
class TestValidatingCompleteness(unittest.TestCase):
""" Unittests of the function find_other_sg_relation_matches """
@staticmethod
def get_default_signalgroup(name: str, min_greenyellow: float = 10.0, max_greenyellow: float = 80.0,
min_red: float = 10.0, max_red: float = 80.0) -> SignalGroup:
""" Get a default signalgroup object"""
traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)
return SignalGroup(id=name, traffic_lights=[traffic_light],
min_greenyellow=min_greenyellow, max_greenyellow=max_greenyellow, min_red=min_red,
max_red=max_red, min_nr=1, max_nr=3)
@staticmethod
def get_default_intersection(additional_signalgroups: Optional[List[SignalGroup]] = None
) -> Intersection:
"""
Get a default intersection object with 2 conflicting signal groups "sg1" and "sg2"
:param additional_signalgroups: signal groups to add to the intersection (besides signal group 'sg1' and 'sg2')
(besides the conflict between signal group 'sg1' and 'sg2')
:return: the intersection object
"""
if additional_signalgroups is None:
additional_signalgroups = []
signalgroup1 = TestValidatingCompleteness.get_default_signalgroup(name="sg1")
signalgroup2 = TestValidatingCompleteness.get_default_signalgroup(name="sg2")
conflict = Conflict(id1="sg1", id2="sg2", setup12=2, setup21=3)
intersection = Intersection(signalgroups=[signalgroup1, signalgroup2] + additional_signalgroups,
conflicts=[conflict])
return intersection
def test_complete(self) -> None:
# WHEN
fts = FixedTimeSchedule(greenyellow_intervals=dict(
sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],
sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),
period=100)
intersection = TestValidatingCompleteness.get_default_intersection()
# WHEN
validate_completeness(intersection=intersection, fts=fts)
# THEN no error should be raised
def test_signalgroup_missing(self) -> None:
# WHEN
fts = FixedTimeSchedule(greenyellow_intervals=dict(
sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],
sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),
period=100)
signalgroup3 = TestValidatingCompleteness.get_default_signalgroup(name="sg3")
intersection = TestValidatingCompleteness.get_default_intersection(additional_signalgroups=[signalgroup3])
with self.assertRaises(SafetyViolation):
# WHEN
validate_completeness(intersection=intersection, fts=fts)
# THEN no error should be raised
def test_no_greenyellow_intervals(self) -> None:
# WHEN
fts = FixedTimeSchedule(greenyellow_intervals=dict(
sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],
sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)],
sg3=[]),
period=100)
signalgroup3 = TestValidatingCompleteness.get_default_signalgroup(name="sg3")
intersection = TestValidatingCompleteness.get_default_intersection(additional_signalgroups=[signalgroup3])
with self.assertRaises(SafetyViolation):
# WHEN
validate_completeness(intersection=intersection, fts=fts)
# THEN no error should be raised
|
python
|
import cv2
from PIL import Image
import argparse
import os
import glob
import time
from pathlib import Path
import torch
from config import get_config
from mtcnn import MTCNN
import mxnet as mx
import numpy as np
from Learner import face_learner
from utils import load_facebank, draw_box_name, prepare_facebank
from face_detection.accuracy_evaluation import predict
from face_detection.config_farm import configuration_10_320_20L_5scales_v2 as cfg
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for face verification')
parser.add_argument("-f", "--folder", help="folder of test images",default='./', type=str)
parser.add_argument("--extension", help="image extension",default='jpg', type=str)
parser.add_argument("-s", "--save_name", help="output file name",default='recording', type=str)
parser.add_argument('-th','--threshold',help='threshold to decide identical faces',default=1.54, type=float)
parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true")
parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true")
parser.add_argument("-b", "--begin", help="from when to start detection(in seconds)", default=0, type=int)
parser.add_argument("-d", "--duration", help="perform detection for how long(in seconds)", default=0, type=int)
parser.add_argument("-w", "--weight", help="model path", default='', type=str)
args = parser.parse_args()
conf = get_config(False)
mtcnn = MTCNN()
print('mtcnn loaded')
learner = face_learner(conf, True)
learner.threshold = args.threshold
if conf.device.type == 'cpu':
learner.load_state(conf, 'cpu_final.pth', True, True)
else:
# learner.load_state(conf, 'mobilefacenet.pth', True, True)
learner.load_state(conf, 'ir_se50.pth', True, True, weight=args.weight)
learner.model.eval()
print('learner loaded')
if args.update:
targets, names = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta)
print('facebank updated')
else:
targets, names = load_facebank(conf)
print('facebank loaded')
# cap = cv2.VideoCapture(str(conf.facebank_path/args.file_name))
# cap.set(cv2.CAP_PROP_POS_MSEC, args.begin * 1000)
# fps = cap.get(cv2.CAP_PROP_FPS)
fps = 30
# os.chdir(args.folder)
video_writer = cv2.VideoWriter(str(conf.facebank_path/'{}.avi'.format(args.save_name)),
cv2.VideoWriter_fourcc(*'XVID'), int(fps), (1280,720))
if args.duration != 0:
i = 0
symbol_file_path = 'face_detection/symbol_farm/symbol_10_320_20L_5scales_v2_deploy.json'
model_file_path = 'face_detection/saved_model/configuration_10_320_20L_5scales_v2/train_10_320_20L_5scales_v2_iter_1800000.params'
# self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
# print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
ctx = mx.gpu(0)
face_detector = predict.Predict(mxnet=mx,
symbol_file_path=symbol_file_path,
model_file_path=model_file_path,
ctx=ctx,
receptive_field_list=cfg.param_receptive_field_list,
receptive_field_stride=cfg.param_receptive_field_stride,
bbox_small_list=cfg.param_bbox_small_list,
bbox_large_list=cfg.param_bbox_large_list,
receptive_field_center_start=cfg.param_receptive_field_center_start,
num_output_scales=cfg.param_num_output_scales)
for file in glob.glob(args.folder + "/*.{}".format(args.extension)):
print(file)
frame = cv2.imread(file)
image = Image.fromarray(frame[...,::-1]) #bgr to rgb
# image = Image.fromarray(frame)
try:
# bboxes, faces = mtcnn.align_multi(image, conf.face_limit, 16)
# print(faces[0].size)
# backSub = cv2.createBackgroundSubtractorMOG2()
# backSub = cv2.createBackgroundSubtractorKNN()
# test = cv2.resize(frame, dsize=None ,fx=0.25, fy=0.25)
# fgMask = backSub.apply(test)
# cv2.imshow('window_test', test)
# cv2.imshow('window', fgMask)
# if cv2.waitKey(0) == ord('q'):
# break
faces, infer_time = face_detector.predict(frame, resize_scale=0.2, score_threshold=0.6, top_k=10000, \
NMS_threshold=0.2, NMS_flag=True, skip_scale_branch_list=[])
print(len(faces))
bboxes = faces
except Exception as e:
print(e)
bboxes = []
faces = []
if len(bboxes) == 0:
print('no face')
continue
else:
# bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
# bboxes = bboxes.astype(int)
# bboxes = bboxes + [-1,-1,1,1] # personal choice
img_size = 112
margin = 0
# faces = np.empty((len(bboxes), img_size, img_size, 3))
faces = []
img_h, img_w, _ = np.shape(image)
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
xw1 = max(int(x1 - margin ), 0)
yw1 = max(int(y1 - margin ), 0)
xw2 = min(int(x2 + margin ), img_w - 1)
yw2 = min(int(y2 + margin ), img_h - 1)
face = cv2.resize(frame[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
faces.append(Image.fromarray(face[...,::-1]))
start_time = time.time()
results, score = learner.infer(conf, faces, targets, True)
print('Duration: {}'.format(time.time()-start_time))
for idx,bbox in enumerate(bboxes):
x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
xw1 = max(int(x1 - margin ), 0)
yw1 = max(int(y1 - margin ), 0)
xw2 = min(int(x2 + margin ), img_w - 1)
yw2 = min(int(y2 + margin ), img_h - 1)
bbox = [xw1, yw1, xw2,yw2]
if args.score:
frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame)
else:
frame = draw_box_name(bbox, names[results[idx] + 1], frame)
frame = cv2.resize(frame, dsize=None ,fx=0.25, fy=0.25)
cv2.imshow('window', frame)
if cv2.waitKey(0) == ord('q'):
break
video_writer.write(frame)
# if args.duration != 0:
# i += 1
# if i % 25 == 0:
# print('{} second'.format(i // 25))
# if i > 25 * args.duration:
# break
# cap.release()
video_writer.release()
|
python
|
class PixelNotChangingError(Exception):
pass
|
python
|
__author__ = 'kim'
try:
import pyfftw
print('-------------------')
print('| pyFFTW detected |')
print('-------------------')
except:
print('-------------------------------')
print('* WARNING: No pyFFTW detected *')
print('-------------------------------')
from upsilon.utils import utils
from upsilon.utils.logger import Logger
from upsilon.extract_features.extract_features import ExtractFeatures
from upsilon.extract_features.is_period_alias import is_period_alias as IsPeriodAlias
from upsilon.extract_features.feature_set import get_feature_set
from upsilon.datasets.base import load_rf_model
from upsilon.predict.predict import predict
from upsilon.test.predict import run as test_predict
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.